diff --git a/Comp/r-east_gsd_tte.qmd b/Comp/r-east_gsd_tte.qmd index 04b693882..884c14616 100644 --- a/Comp/r-east_gsd_tte.qmd +++ b/Comp/r-east_gsd_tte.qmd @@ -2,8 +2,6 @@ title: "R vs EAST vs SAS: Group sequential design" editor_options: chunk_output_type: console -execute: - eval: false --- ## Introduction @@ -30,7 +28,6 @@ We assume that a GSD is utilized for progression-free survival (PFS) endpoint. I Further design assumptions are as follows: ```{r} -#| eval: true # PFS HR=0.6 hr1_pfs <- 0.6 # median PFS of 9.4 months in the control arm @@ -62,7 +59,6 @@ Note that, in EAST the number of target events is reported as an integer, howeve For ease of comparison the results from EAST are summarized below: ```{r} -#| eval: true #| echo: false #| warning: false library(flextable) @@ -106,7 +102,6 @@ pfs_east |> - gsDesign code to reproduce the above EAST results: ```{r} -#| eval: true #| warning: false library(gsDesign) @@ -136,7 +131,6 @@ pfs_gsDesign |> - gsDesign vs EAST comparison using absolute differences: ```{r} -#| eval: true #| echo: false digit_comp <- 4 pfs_gsDesign |> @@ -191,7 +185,6 @@ pfs_gsDesign |> - Note that, here `gsDesign2::gs_power_ahr()` is used given the number of target events for each analysis based on EAST results. ```{r} -#| eval: true #| echo: false #helper function to align the gsDesign2 summary with gsDesign summary as_gs <- function(xnph) { @@ -318,7 +311,6 @@ as_gs <- function(xnph) { ``` ```{r} -#| eval: true #| warning: false #| message: false library(gsDesign2) @@ -365,7 +357,6 @@ pfs_gsDesign2 |> - gsDesign2 vs EAST comparison using absolute differences: ```{r} -#| eval: true #| echo: false pfs_gsDesign2 |> as_gs() |> @@ -418,7 +409,6 @@ pfs_gsDesign2 |> - rpact code to reproduce the above EAST results appears below. ```{r} -#| eval: true #| warning: false library(rpact) @@ -451,7 +441,6 @@ kable(summary(pfs_rpact)) ```{r} #| echo: false -#| eval: true pcross_h1_eff <- cumsum(pfs_rpact$rejectPerStage) pcross_h1_fut <- pfs_rpact$futilityPerStage[1] @@ -548,7 +537,7 @@ pfs_rpact_sum |> - SAS code to reproduce the above rpact results appears below. -```{sas} +```sas PROC SEQDESIGN BOUNDARYSCALE=MLE ERRSPEND; DESIGN NSTAGES=2 INFO=CUM(0.748936170212766 1.0) @@ -572,7 +561,6 @@ RUN; The following shows the events (D) and required sample sizes (N) for IA and FA. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -585,7 +573,7 @@ Please note that the `BOUNDARYSCALE=MLE | SCORE | STDZ | PVALUE` options display SAS doesn't provide a boundary information with HR, so the HR boundaries is obtained from the MLE boundaries (as MLE $=\hat{\theta}=-log(\text{HR})$, see [SAS User's Guide: Test for Two Survival Distributions with a Log-Rank Test](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_seqdesign_details52.htm#statug.seqdesign.cseqdlogrank)) via the following code. -```{sas} +```sas DATA BHR; SET BMLE; Bound_UA_HR=exp(-Bound_UA); @@ -599,7 +587,6 @@ RUN; The HR boundaries are shown below. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -611,7 +598,6 @@ knitr::include_graphics( The results calculated by SAS are presneted in the table below. Please note that SAS doesn't report the probablities $P(Cross | HR=1)$ and $P(Cross | HR=0.6)$, resulting in empty cells for these results in the table. ```{r} -#| eval: true #| echo: false #| warning: false library(flextable) @@ -648,7 +634,6 @@ pfs_sas |> - SAS vs rapct comparison using absolute differences: ```{r} -#| eval: true #| echo: false #| warning: false pfs_rpact_sum |> @@ -694,7 +679,6 @@ pfs_rpact_sum |> - SAS vs EAST comparison using absolute differences: ```{r} -#| eval: true #| echo: false pfs_sas_sum <- tibble::tibble( sas_eff = pfs_sas$eff_125, diff --git a/Comp/r-sas-python_survey-stats-summary.qmd b/Comp/r-sas-python_survey-stats-summary.qmd index 192b684d6..b69bee4af 100644 --- a/Comp/r-sas-python_survey-stats-summary.qmd +++ b/Comp/r-sas-python_survey-stats-summary.qmd @@ -1,8 +1,6 @@ --- title: "R vs SAS vs Python Survey Summary Statistics" bibliography: survey-stats-summary.bib -execute: - eval: false --- This document will compare the survey summary statistics functionality in SAS (available through SAS/STAT), R (available from the [`{survey}`](%5B%60%7Bsurvey%7D%60%5D(https://r-survey.r-forge.r-project.org/survey/html/api.html)) package), and Python (available from the [`samplics`](https://samplics-org.github.io/samplics/) package), highlighting differences in methods and results. Only the default Taylor series linearisation method for calculating variances is used in all languages. A more detailed comparison between R and SAS for specific methods and use-cases is available in [@2017_YRBS], [@so2020modelling], or [@adamico_2009]. For a general guide to survey statistics, which has companion guides for both R and SAS, see [@Lohr_2022]. @@ -31,7 +29,6 @@ For the full R, SAS, and Python code and results used for this comparison, see b ## R ```{r} -#| eval: true #| message: false #| warning: false library(survey) @@ -125,7 +122,7 @@ print(list( ## SAS -```{sas} +```sas * Mean, sum quantile of HI_CHOL; proc surveymeans data=nhanes mean sum clm quantile=(0.025 0.5 0.975); cluster SDMVPSU; @@ -277,7 +274,6 @@ run; ## Python ```{python} -#| eval: true import pandas as pd from samplics import TaylorEstimator from samplics.utils.types import PopParam @@ -363,7 +359,6 @@ print( `samplics` in Python does not have a method for calculating quantiles, and in R and SAS the available methods lead to different results. To demonstrate the differences in calculating quantiles, we will use the `apisrs` dataset from the `survey` package in R [@API_2000]. ```{r} -#| eval: true #| message: false library(survey) @@ -377,7 +372,7 @@ In SAS, PROC SURVEYMEANS will calculate quantiles of specific probabilities as y The method and results from SAS are as follows: -```{sas} +```sas proc surveymeans data=apisrs total=6194 quantile=(0.025 0.5 0.975); var growth; run; @@ -407,7 +402,6 @@ run; If in R we use the default `qrule="math"` (equivalent to `qrule="hf1"` and matches `type=1` in the `quantile` function for unweighted data) along with the default `interval.type="mean"`, we get the following results: ```{r} -#| eval: true srs_design <- survey::svydesign(data = apisrs, id = ~1, fpc = ~fpc, ) survey::svyquantile( @@ -422,7 +416,6 @@ survey::svyquantile( Here we can see that the quantiles, confidence intervals, and standard errors do not match SAS. From testing, none of the available `qrule` methods match SAS for the quantile values, so it is recommended to use the default values unless you have need of some of the other properties of different quantile definitions - see [`vignette("qrule", package="survey")`](https://cran.r-project.org/web/packages/survey/vignettes/qrule.pdf) for more detail. If an exact match to SAS is required, then the `svyquantile` function allows for passing a custom function to the `qrule` argument to define your own method for calculating quantiles. Below is an example that will match SAS: ```{r} -#| eval: true sas_qrule <- function(x, w, p) { # Custom qrule to match SAS, based on survey::oldsvyquantile's internal method if (any(is.na(x))) { @@ -459,7 +452,6 @@ sas_quants Note that although the quantiles and standard errors match, the confidence intervals still do not match SAS. For this another custom calculation is required, based on the formula used in SAS: ```{r} -#| eval: true sas_quantile_confint <- function(newsvyquantile, level = 0.05, df = Inf) { q <- coef(newsvyquantile) se <- survey::SE(newsvyquantile) @@ -503,7 +495,6 @@ In contrast, the `samplics` package in Python is still early in development, and ::: {.callout-note collapse="true" title="Session Info"} ```{r} -#| eval: true #| echo: false si <- sessioninfo::session_info("survey", dependencies = FALSE) # If reticulate is used, si will include python info. However, this doesn't diff --git a/Comp/r-sas-summary-stats.qmd b/Comp/r-sas-summary-stats.qmd index ebb2aa500..9c8315416 100644 --- a/Comp/r-sas-summary-stats.qmd +++ b/Comp/r-sas-summary-stats.qmd @@ -1,6 +1,5 @@ --- title: "Deriving Quantiles or Percentiles in R vs SAS" -eval: false --- ### Data @@ -15,7 +14,7 @@ c(10, 20, 30, 40, 150, 160, 170, 180, 190, 200) Assuming the data above is stored in the variable `aval` within the dataset `adlb`, the 25th and 40th percentiles could be calculated using the following code. -```{sas} +```sas proc univariate data=adlb; var aval; output out=stats pctlpts=25 40 pctlpre=p; @@ -38,6 +37,7 @@ The procedure has the option `PCTLDEF` which allows for five different percentil The 25th and 40th percentiles of `aval` can be calculated using the `quantile` function. ```{r} +#| eval: false quantile(adlb$aval, probs = c(0.25, 0.4)) ``` @@ -45,7 +45,6 @@ This gives the following output. ```{r} #| echo: false -#| eval: true adlb <- data.frame(aval = c(10, 20, 30, 40, 150, 160, 170, 180, 190, 200)) quantile(adlb$aval, probs = c(0.25, 0.4)) ``` @@ -59,6 +58,7 @@ The default percentile definition used by the UNIVARIATE procedure in SAS finds It is possible to get the quantile function in R to use the same definition as the default used in SAS, by specifying `type=2`. ```{r} +#| eval: false alquantile(adlb$aval, probs = c(0.25, 0.4), type = 2) ``` @@ -66,7 +66,6 @@ This gives the following output. ```{r} #| echo: false -#| eval: true quantile(adlb$aval, probs = c(0.25, 0.4), type = 2) ``` diff --git a/Comp/r-sas_anova.qmd b/Comp/r-sas_anova.qmd index 599b52d95..c659f6d37 100644 --- a/Comp/r-sas_anova.qmd +++ b/Comp/r-sas_anova.qmd @@ -1,7 +1,5 @@ --- title: "R vs SAS Linear Models" -execute: - eval: false --- # R vs. SAS ANOVA @@ -31,7 +29,6 @@ The following table provides an overview of the support and results comparabilit In order to get the ANOVA model fit and sum of squares you can use the `anova` function in the `stats` package. ```{r} -#| eval: true library(emmeans) drug_trial <- read.csv("../data/drug_trial.csv") @@ -43,7 +40,6 @@ lm_model |> It is recommended to use the `emmeans` package to get the contrasts between R. ```{r} -#| eval: true lm_model |> emmeans("drug") |> contrast( @@ -56,7 +52,7 @@ lm_model |> In SAS, all contrasts must be manually defined, but the syntax is largely similar in both. -```{sas} +```sas proc glm data=work.mycsv; class drug; model post = pre drug / solution; @@ -66,7 +62,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -119,7 +114,7 @@ Provided below is a detailed comparison of the results obtained from both SAS an Note, however, that there are some cases where the scale of the parameter estimates between SAS and R is off, though the test statistics and p-values are identical. In these cases, we can adjust the SAS code to include a divisor. As far as we can tell, this difference only occurs when using the predefined Base R contrast methods like `contr.helmert`. -```{sas} +```sas proc glm data=work.mycsv; class drug; model post = pre drug / solution; diff --git a/Comp/r-sas_chi-sq.qmd b/Comp/r-sas_chi-sq.qmd index bd926e289..2efad42c5 100644 --- a/Comp/r-sas_chi-sq.qmd +++ b/Comp/r-sas_chi-sq.qmd @@ -1,7 +1,5 @@ --- title: "R/SAS Chi-Squared and Fisher's Exact Comparision" -execute: - eval: false --- # Chi-Squared Test @@ -26,7 +24,6 @@ For an r x c table (where r is the number of rows and c the number of columns), For this example we will use data about cough symptoms and history of bronchitis. ```{r} -#| eval: true bronch <- matrix(c(26, 247, 44, 1002), ncol = 2) row.names(bronch) <- c("cough", "no cough") colnames(bronch) <- c("bronchitis", "no bronchitis") @@ -36,13 +33,12 @@ bronch To a chi-squared test in R you will use the following code. ```{r} -#| eval: true stats::chisq.test(bronch) ``` To run a chi-squared test in SAS you used the following code. -```{sas} +```sas proc freq data=proj1.bronchitis; tables Cough*Bronchitis / chisq; run; @@ -51,7 +47,6 @@ run; The result in the "Chi-Square" section of the results table in SAS will not match R, in this case it is 12.1804 with a p-value of 0.0005. This is because by default R does a Yates continuity adjustment for 2x2 tables. To change this set `correct` to false. ```{r} -#| eval: true stats::chisq.test(bronch, correct = FALSE) ``` diff --git a/Comp/r-sas_friedman.qmd b/Comp/r-sas_friedman.qmd index c995cab25..cc78c365e 100644 --- a/Comp/r-sas_friedman.qmd +++ b/Comp/r-sas_friedman.qmd @@ -1,11 +1,8 @@ --- title: "R vs SAS Non-parametric Analysis - Friedman test" -execute: - eval: false --- ```{r} -#| eval: true #| label: review-setup #| message: false #| warning: false @@ -19,7 +16,6 @@ library(ggpubr) Friedman's test is used when you have one within-subjects independent variable with two or more levels and a dependent variable that is not interval and normally distributed (but at least ordinal). To build such unreplicated blocked data, we'll create a data frame called  `df_bp` from random number. In  `df_bp` : dependent variable `bp` is randomly generated; Block: `subjid` ; Group: `time_point`. ```{r} -#| eval: true set.seed(123) df_bp = data.frame(bp = runif(n = 50, 138, 200)) |> @@ -42,7 +38,6 @@ ggpubr::ggboxplot(df_bp, x = "time_point", y = "bp", add = "jitter") In R, **friedman_test** can be used to compare multiple means of rank in `bp` grouped by `time_point`, stratified by `subjid`. ```{r} -#| eval: true res.fried <- df_bp |> friedman_test(bp ~ time_point | subjid) res.fried @@ -52,7 +47,7 @@ res.fried In SAS, **CMH2** option of PROC FREQ is used to perform Friedman's test. -```{sas} +```sas proc freq data=data_bp; tables patient*dos*bp / cmh2 scores=rank noprint; @@ -87,7 +82,6 @@ SAS `PROC FREQ` documentation: mutate( @@ -105,7 +103,6 @@ glimpse(lung2) We use the `trial01` dataset provided with {beeca} R package. Initial data preparation involves setting the treatment indicator as a categorical variable and removing any incomplete cases. ```{r} -#| eval: true data("trial01") trial01$trtp <- factor(trial01$trtp) ## set treatment to a factor @@ -145,7 +142,6 @@ Note, the default fitting method in `glm` is consistent with the default fitting - Default fitting method for `PROC LOGISTIC` procedure is Fisher's scoring method, which is reported as part of the SAS default output, and it is equivalent to "Iteratively reweighted least squares" method as reported in this [documentation](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_logistic_sect033.htm). ```{r} -#| eval: true m1 <- stats::glm( wt_grp ~ age + sex + ph.ecog + meal.cal, data = lung2, @@ -159,13 +155,12 @@ summary(m1)$coefficients Note, function `confint.default` gives the Wald confidence limits, which is the default option in SAS `PROC LOGISTIC` procedure; whereas `confint` gives the profile-likelihood limits. Conditional odds ratio is calculated by taking the exponential of the model parameters. ```{r} -#| eval: true cbind(est = coef(m1), confint.default(m1)) ``` ### `PROC LOGISTIC` in SAS (without firth option) -```{sas} +```sas PROC LOGISTIC DATA=LUNG2; # import lung MODEL WT_GRP(EVENT="weight_gain") = AGE SEX PH_ECOG MEAL_CAL; ods output ESTIMATEs=estimates; @@ -219,7 +214,6 @@ Note that while Firth logistic regression is not required for our example datase - By default, `logistf` function in R computes the confidence interval estimates and hypothesis tests (including p-value) for each parameter based on profile likelihood, which is also reported in the output below. However, Wald method (confidence interval and tests) can be specified by specifying the `control` argument with [`pl = FALSE`](https://cran.r-project.org/web/packages/logistf/logistf.pdf). ```{r} -#| eval: true firth_mod <- logistf( wt_grp ~ age + sex + ph.ecog + meal.cal, data = lung2, @@ -239,7 +233,6 @@ summary(firth_mod)$coefficients Note, function `confint` gives the profile-likelihood limits. Given the parameters from Firth's bias-reduced logistic regression is estimated using penalized maximum likelihood, `confint` function is used. Conditional odds ratio is calculated by taking the exponential of the model parameters. ```{r} -#| eval: true cbind(est = coef(firth_mod), confint(firth_mod)) ``` @@ -249,7 +242,7 @@ cbind(est = coef(firth_mod), confint(firth_mod)) - `XCONV` specifies relative parameter convergence criterion, which should correspond to the `xconv` in `logistf` function in R. We specify `XCONV = 0.00000001` so it should be consistent with the R code above. -```{sas} +```sas PROC LOGISTIC DATA=LUNG2; MODEL WT_GRP(EVENT="weight gain") = AGE SEX PH_ECOG MEAL_CAL / firth clodds=PL clparm=PL xconv = 0.00000001; @@ -286,7 +279,6 @@ We compare two implementions of g-computation in SAS: We fit a logistic regression model with covariate adjustment to estimate the marginal treatment effect using the delta method for variance estimation: as outlined in Ge et al (2011). ```{r} -#| eval: true #| label: glm ## fit the model including model based variance estimation with delta method fit1 <- stats::glm(aval ~ trtp + bl_cov, family = "binomial", data = trial01) |> @@ -310,7 +302,7 @@ fit1$marginal_se We now use the SAS [`%Margins`](https://support.sas.com/kb/63/038.html) macro to perform the Ge et al. (2011) method on `trial01` to estimate the marginal risk difference and it's standard error. -```{sas} +```sas %Margins(data = myWork.trial01, class = trtp, classgref = first, /*Set reference to first level*/ @@ -337,7 +329,7 @@ run; ### `%LR` macro in SAS (Ge et al, 2011) -```{sas} +```sas %LR(data = myWork.trial01, /* input data set */ var1 = bl_cov, /* continuous covariates in the logistic regression */ var2 = trtp, /* categorical covariates in the logistic regression */ diff --git a/Comp/r-sas_mcnemar.qmd b/Comp/r-sas_mcnemar.qmd index 2626cf701..fe5f0cf36 100644 --- a/Comp/r-sas_mcnemar.qmd +++ b/Comp/r-sas_mcnemar.qmd @@ -1,7 +1,5 @@ --- title: "R v SAS McNemar's test" -execute: - eval: false --- ## Introduction @@ -20,7 +18,6 @@ The following table provides an overview of the support and results comparabilit In R,the {stats} or the {coin} package can be used to calculate McNemar. The {coin} package has the same defaults as SAS. But, using either of these packages, the first step is to calculate a frequency table, using the table function. ```{r} -#| eval: true library(coin) colds <- read.csv( @@ -35,7 +32,6 @@ coin::mh_test(freq_tbl) In order to get Cohen's Kappa an additional package is needed. ```{r} -#| eval: true library(vcd) cohen_kappa <- vcd::Kappa(freq_tbl) @@ -45,14 +41,13 @@ confint(cohen_kappa, level = 0.95) The FREQ procedure can be used in SAS with the AGREE option to run the McNemar test, with OR, and RISKDIFF options stated for production of odds ratios and risk difference. These options were added as `epibasix::mcNemar` outputs the odds ratio and risk difference with confidence limits as default. In contrast to R, SAS outputs the Kappa coefficients with confident limits as default. -```{sas} +```sas proc freq data=colds; tables age12*age14 / agree or riskdiff; run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 40% @@ -69,7 +64,6 @@ There is another R package that is sometimes used to calculate McNemar's, called ::: {.callout-note collapse="true" title="Session Info"} ```{r} -#| eval: true #| echo: false si <- sessioninfo::session_info( c("coin", "stats"), #Vector of packages used diff --git a/Comp/r-sas_mmrm.qmd b/Comp/r-sas_mmrm.qmd index 70e553550..32e9b2a25 100644 --- a/Comp/r-sas_mmrm.qmd +++ b/Comp/r-sas_mmrm.qmd @@ -1,14 +1,14 @@ --- title: "R vs SAS MMRM" -message: false -warning: false -echo: true -eval: false +execute: + message: false + warning: false + echo: true + eval: false --- ```{r} #| include: false -#| eval: true knitr::opts_chunk$set( collapse = TRUE, comment = "#>", @@ -22,7 +22,6 @@ knitr::opts_chunk$set( #| label: review-setup #| include: false #| eval: true - library(dplyr) library(purrr) library(microbenchmark) @@ -144,7 +143,7 @@ Code for fitting MMRMs to the FEV data using each of the considered functions an ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq; @@ -176,7 +175,7 @@ mmrm::mmrm( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq; @@ -187,6 +186,7 @@ RUN; ### `mmrm` ```{r} + mmrm::mmrm( formula = FEV1 ~ ARMCD * AVISIT + ar1h(VISITN | USUBJID), data = fev_data @@ -209,7 +209,7 @@ nlme::gls( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = ARMCD|AVISIT / ddfm=satterthwaite solution chisq; @@ -251,7 +251,7 @@ glmmTMB::glmmTMB( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq; @@ -294,7 +294,7 @@ glmmTMB::glmmTMB( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq; @@ -326,7 +326,7 @@ nlme::gls( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq; @@ -370,7 +370,7 @@ glmmTMB::glmmTMB( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq; @@ -401,7 +401,7 @@ glmmTMB::glmmTMB( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq; @@ -422,7 +422,7 @@ mmrm::mmrm( ### `PROC GLIMMIX` -```{sas} +```sas PROC GLIMMIX DATA = fev_data; CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID; MODEL FEV1 = ARMCD|AVISIT / ddfm=satterthwaite solution chisq; diff --git a/Comp/r-sas_negbin.qmd b/Comp/r-sas_negbin.qmd index a837828ba..feb2d91a6 100644 --- a/Comp/r-sas_negbin.qmd +++ b/Comp/r-sas_negbin.qmd @@ -2,8 +2,8 @@ title: "R vs SAS: Negative Binomial Regression" format: html toc: true -echo: true -eval: false +execute: + echo: true keep-hidden: true --- @@ -96,7 +96,6 @@ In order to run these analyses we need to load a few packages. ```{r} #| output: false -#| eval: true #| message: false library(MASS) library(dplyr) @@ -108,7 +107,6 @@ We also define the `glm_nb_cov` function to obtain the SAS variance-covariance m ```{r} #| output: false #| message: false -#| eval: true ## Helper function to compute the variance from negative binomial regression ## This matches with variance estimated from SAS glm_nb_cov <- function(mod) { @@ -183,7 +181,6 @@ A dummy dataset is simulated, including The dummy dataset is saved as a csv file, and then the csv file is read into SAS. ```{r} -#| eval: true N = 100 # set seed for replication @@ -235,7 +232,7 @@ Exact match (at 0.001 level) can be obtained using `glm.nb` in R vs `PROC GENMOD After importing the dummy dataset we can run the negative binomial regression in SAS using \`PROC GENMOD. We estimate the model parameters and lsmeans for the treatment arms using both the default and OM weights. -```{sas} +```sas proc genmod data=df; class GRPC (ref='Plb') X2 (ref='A'); model y = GRPC x1 x2 / dist=negbin link=log offset=logtime; @@ -253,7 +250,6 @@ Below is a screenshot of output tables summarizing coefficient estimates and lsm Lets now try to reproduce the results in R using `MASS::glm.nb`. ```{r} -#| eval: true fit <- glm.nb(y ~ grpc + x1 + x2 + offset(logtime), data = df, x = TRUE) # model coefficients summary @@ -263,7 +259,6 @@ summary(fit)$coefficients We can see that while the estimates are exactly matching those in SAS, the standard errors are slightly smaller. This is a result of the difference in covariance estimation mentioned above. To obtain exactly the same results as in SAS we need to re-estimate the covariance matrix using the `glm_nb_cov` function we defined earlier. Note that to use this function with the fitted results we needed to specify `x = TRUE` in the `glm.nb` function so that the design matrix is available. ```{r} -#| eval: true sigma_hat <- glm_nb_cov(fit) ## recalculate confidence intervals, and p-values @@ -293,7 +288,6 @@ new_summary Now the estimates, standard errors, 95% confidence interval limits and p-values are exactly matching those in SAS up to the 4th digit. We can also provide an estimate and CI for the dispersion parameter: ```{r} -#| eval: true # estimate and 95%-CI for k = 1/theta theta_est <- fit$theta theta_se <- sqrt(sigma_hat[6, 6]) @@ -311,7 +305,6 @@ We see that while the point estimate is the same as in SAS, the CI for the dispe Finally we can replicate the estimation of lsmeans in SAS via the emmeans package. Note that we need to supply the re-estimated covariance matrix, but only provide the rows and columns for the model coefficients without the dispersion parameter as emmeans does not need the latter. ```{r} -#| eval: true # lsmeans with weights = equal, equivalent to SAS default lsmean1 <- emmeans( fit, @@ -338,7 +331,6 @@ lsmean2 Estimates and CIs are exactly matching those in SAS for both of the options. Finally we can also obtain the z statistic and corresponding p-values: ```{r} -#| eval: true test(lsmean1) test(lsmean2) ``` diff --git a/Comp/r-sas_psmatch.qmd b/Comp/r-sas_psmatch.qmd index c65fde747..ad0c525d9 100644 --- a/Comp/r-sas_psmatch.qmd +++ b/Comp/r-sas_psmatch.qmd @@ -1,6 +1,6 @@ --- title: "Propensity Score Matching" -execute: +execute: eval: false --- @@ -164,7 +164,7 @@ Characteristic (N = 120) (N = 180) Mean Diff. ### SAS -```{sas} +```sas proc psmatch data=data region=cs(extend=0); class trtp sex bmi_cat; psmodel trtp(Treated="trt")= sex weight age bmi_cat; diff --git a/Comp/r-sas_survival.qmd b/Comp/r-sas_survival.qmd index 5e7b34c6a..f337663b0 100644 --- a/Comp/r-sas_survival.qmd +++ b/Comp/r-sas_survival.qmd @@ -1,7 +1,5 @@ --- title: "R vs SAS - Kaplan Meier and Cox-proportion hazards modelling" -execute: - eval: false --- # Comparison of SAS vs R @@ -29,7 +27,6 @@ Results from the examples shown for R [here](https://psiaims.github.io/CAMIS/R/s Comparing the non-stratified model results side-by-side, the CIs for the quartile estimates and landmark estimates are different between R and SAS. HR and CI also have slight differences. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -56,7 +53,7 @@ fit.cox <- survival::coxph( - SAS: change method for ties to use "efron" -```{sas} +```sas proc phreg data=dat; class afb; model lenfol*fstat(0) = afb/rl ties = efron; @@ -88,7 +85,7 @@ fit.km <- survival::survfit( - SAS: change to "log" -```{sas} +```sas proc lifetest data=dat conftype=log; time lenfoly*fstat(0); strata afb; @@ -102,7 +99,6 @@ From a [reference](https://myweb.uiowa.edu/pbreheny/7210/f15/notes/9-10.pdf): Th Now if we change the confidence interval type in SAS to "log" and tie handling to "efron", the results will be identical to the results in R. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -112,7 +108,6 @@ knitr::include_graphics("../images/survival/r_sas_chg_default.png") Below is the side-by-side comparison for stratified analysis with default methods in SAS matched to R's, the results are also identical. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -136,7 +131,7 @@ fit.cox <- survival::coxph(survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat) - SAS: adjust convergence criterion -```{sas} +```sas proc phreg data=dat; class afb; model lenfol*fstat(0) = afb / rl fconv = 1e-9; @@ -156,7 +151,6 @@ The convergence criterion details are described in their documentation: Now we look at other cases when the data has some special type which causes a mismatch between SAS and R. Suppose a dataset has 10 observations, and the first 5 are all events, and the last 5 are all censored. ```{r} -#| eval: true test <- tibble( time = c(54, 75, 77, 84, 87, 92, 103, 105, 112, 118), status = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 0) @@ -188,7 +182,7 @@ summary(fit.km, times = c(80, 100, 120), extend = T) Below is the SAS code: -```{sas} +```sas proc lifetest data=dat outsurv=_SurvEst timelist= 80 100 120 reduceout stderr; time lenfoly*fstat(0); run; @@ -197,7 +191,6 @@ run; Below is the side-by-side comparison: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -215,7 +208,6 @@ The kth quantile for a survival curve S(t) is the location at which a horizontal For example, using the data above, the survival probability is exactly 0.5 at time=87 and remains at 0.5 until the last censored observation at 118. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -240,7 +232,6 @@ For the 120-day event-free estimate, SAS considers that 120 days is beyond the m If we change the last observation in the dataset to be an event (instead of censored), R and SAS will both give 0 for the event-free survival estimate, because it is for sure that all subjects did not survive beyond 120 days. ```{r} -#| eval: true test <- tibble( time = c(54, 75, 77, 84, 87, 92, 103, 105, 112, 118), status = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 1) @@ -250,7 +241,6 @@ test ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% diff --git a/SAS/SAS_Friedmantest.qmd b/SAS/SAS_Friedmantest.qmd index bb18eadf7..9d5b8eb08 100644 --- a/SAS/SAS_Friedmantest.qmd +++ b/SAS/SAS_Friedmantest.qmd @@ -1,7 +1,5 @@ --- title: "Friedman Chi-Square test using SAS" -execute: - eval: false --- # Introduction @@ -20,7 +18,7 @@ Simulated dataset of 10 subjects(blocks) with continuous endpoints are generated ## Data source -```{sas} +```sas data one_way_repeat; do subject = 1 to 10; do timepoint = 1 to 4; @@ -50,7 +48,7 @@ When the data contains missing response, the procedure discards the correspondin ## Example Code for Friedman Chi-square test -```{sas} +```sas proc freq data=one_way_repeat; tables subject*timepoint*response / cmh2 scores=rank noprint; diff --git a/SAS/ancova.qmd b/SAS/ancova.qmd index 7e28ad903..d93ffa7e4 100644 --- a/SAS/ancova.qmd +++ b/SAS/ancova.qmd @@ -1,12 +1,9 @@ --- title: "Ancova" date: "2024-02-20" -execute: - eval: false --- ```{r} -#| eval: true #| label: setup #| include: false knitr::opts_chunk$set(echo = TRUE) @@ -20,7 +17,7 @@ In SAS, there are several ways to perform ANCOVA analysis. One common way is to The following data was used in this example. -```{sas} +```sas data DrugTest; input Drug $ PreTreatment PostTreatment @@; datalines; @@ -37,7 +34,7 @@ data DrugTest; The following code was used to test the effects of a drug pre and post treatment: -```{sas} +```sas proc glm data=DrugTest; class Drug; model PostTreatment = Drug PreTreatment / solution; @@ -50,7 +47,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% diff --git a/SAS/anova.qmd b/SAS/anova.qmd index e383b3404..2181e2323 100644 --- a/SAS/anova.qmd +++ b/SAS/anova.qmd @@ -1,7 +1,5 @@ --- title: "ANOVA" -execute: - eval: false --- ### **Getting Started** @@ -12,7 +10,7 @@ To demonstrate the various types of sums of squares, we'll create a data frame c For this example, we're testing for a significant difference in `stem_length` using ANOVA. -```{sas} +```sas proc glm data = disease; class drug disease; model y=drug disease drug*disease; @@ -30,7 +28,7 @@ knitr::include_graphics("../images/linear/sas-f-table.png") SAS has four types of sums of squares calculations. To get these calculations, the sum of squares option needs to be added (`/ ss1 ss2 ss3 ss4`) to the model statement. -```{sas} +```sas proc glm; class drug disease; model y=drug disease drug*disease / ss1 ss2 ss3 ss4; @@ -40,7 +38,6 @@ run; #### Type I ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -50,7 +47,6 @@ knitr::include_graphics("../images/linear/sas-ss-type-1.png") #### Type II {.unnumbered} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -60,7 +56,6 @@ knitr::include_graphics("../images/linear/sas-ss-type-2.png") #### Type III {.unnumbered} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -70,7 +65,6 @@ knitr::include_graphics("../images/linear/sas-ss-type-3.png") #### Type IV {.unnumbered} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -81,7 +75,7 @@ knitr::include_graphics("../images/linear/sas-ss-type-4.png") To get contrasts in SAS, we use the `estimate` statement. For looking at contrast we are going to fit a different model on new data, that doesn't include an interaction term as it is easier to calculate contrasts without an interaction term. For this dataset we have three different drugs A, C, and E. -```{sas} +```sas proc glm data=testdata; class drug; model post = drug pre / solution; @@ -91,7 +85,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% diff --git a/SAS/association.qmd b/SAS/association.qmd index c5a917c6b..b552e94d3 100644 --- a/SAS/association.qmd +++ b/SAS/association.qmd @@ -1,7 +1,5 @@ --- title: "Association Analysis for Count Data Using SAS" -execute: - eval: false --- In SAS, association analysis methods for count data/contingency tables is typically performed using the `PROC FREQ` procedure. This procedure has options for Chi-Square and Fisher's Exact tests. @@ -10,7 +8,7 @@ In SAS, association analysis methods for count data/contingency tables is typica The following tabulation was used for the SAS Chi-Square and Fisher's testing. This tabulation was derived from the same `lung` dataset used for the R function testing. The dataset is defined as follows: -```{sas} +```sas data test_case; input treatment $ Count Weight $; @@ -26,7 +24,7 @@ data test_case; The following SAS code produces both the Chi-Square and Fisher's Exact tests of association. Note that the results contain many statistics not produced by the corresponding R function. The relevant sections of the output have been outlined in red. -```{sas} +```sas proc freq data = test_case; weight Count; tables treatment * Weight / chisq fisher; @@ -37,7 +35,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% diff --git a/SAS/binomial_test.qmd b/SAS/binomial_test.qmd index deedf3b8e..e2d2642d9 100644 --- a/SAS/binomial_test.qmd +++ b/SAS/binomial_test.qmd @@ -1,14 +1,12 @@ --- title: "Binomial Test on Coin Flips and Clinical Data" -execute: - eval: false --- ## Simulating Coin Flips Set the seed for reproducibility and simulate 1000 coin flips using a Bernoulli distribution. -```{sas} +```sas /* Set the seed for reproducibility */ %let seed = 19; @@ -29,7 +27,7 @@ run; Use SQL to count how many heads and tails were observed in the simulation. -```{sas} +```sas proc sql; select sum(result = "H") as heads_count, @@ -44,7 +42,7 @@ quit; Print the counts using `%put` statements. -```{sas} +```sas %put Heads Count: &heads_count; %put Tails Count: &tails_count; %put Total Flips: &total_flips; @@ -58,7 +56,7 @@ $H_0 : p = 0.5$ ## 1. Exact Binomial Test -```{sas} +```sas proc freq data=coin_flips; tables result / binomial(p=0.5 ); exact binomial; @@ -67,7 +65,7 @@ run; ## 2. Wald Binomial Test(Asymptotic) -```{sas} +```sas proc freq data=coin_flips; tables result / binomial(p=0.5); run; @@ -75,7 +73,7 @@ run; ## 3. Mid-p adjusted Test -```{sas} +```sas proc freq data=coin_flips; tables result / binomial(p=0.5 level='H' cl=midp ); exact binomial / midp; @@ -84,7 +82,7 @@ run; ## 4. Wilson Score Test -```{sas} +```sas proc freq data=coin_flips; tables result / binomial(level='H' p=0.5 cl=score); run; @@ -96,7 +94,7 @@ We load a clinical dataset and test if the observed death proportion is signific ### Import Dataset -```{sas} +```sas proc import datafile='/home/u63532805/CAMIS/lung_cancer.csv' out=lung_cancer dbms=csv @@ -108,7 +106,7 @@ run; ### Create Binary Flag for Deaths -```{sas} +```sas data lung_cancer; set lung_cancer; death_flag = (status = 1); @@ -119,7 +117,7 @@ Perform four binomial tests (Exact test, Wald test, Mid-p adjusted test and Wils ## 1. Exact Binomial Test -```{sas} +```sas proc freq data=lung_cancer; tables death_flag / binomial(p=0.19 level='1'); exact binomial; @@ -129,7 +127,7 @@ run; ## 2. Wald Binomial Test (Asymptotic) -```{sas} +```sas proc freq data=lung_cancer; tables death_flag / binomial(p=0.19 level='1'); title "Asymptotic Binomial Test for Death Proportion"; @@ -138,7 +136,7 @@ run; ## 3. Mid-p adjusted Exact Binomial Test -```{sas} +```sas proc freq data=lung_cancer; tables death_flag / binomial(p=0.19 level='1'); exact binomial / midp; @@ -148,7 +146,7 @@ run; ## 4. Wilson Score Test. -```{sas} +```sas proc freq data=lung_cancer; tables death_flag / binomial(level='1' p=0.19 cl=score); title "Wilson Score Test for Death Proportion"; diff --git a/SAS/binomial_test_files/execute-results/html.json b/SAS/binomial_test_files/execute-results/html.json deleted file mode 100644 index 6980fed30..000000000 --- a/SAS/binomial_test_files/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "7de0bd2e773e79cbf0a0a31c21b7fef5", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Binomial Test on Coin Flips and Clinical Data\"\nexecute: \n eval: false\n---\n\n## Simulating Coin Flips\n\nSet the seed for reproducibility and simulate 1000 coin flips using a Bernoulli distribution.\n\n```{sas}\n/* Set the seed for reproducibility */\n%let seed = 19;\n\ndata coin_flips;\n call streaminit(&seed);\n do i = 1 to 1000;\n /* Simulate coin flips: 1 for Heads (H), 0 for Tails (T) */\n flip = rand(\"Bernoulli\", 0.5);\n/* flip = rand(\"BINOMIAL\", 0.5,1); */\n if flip = 1 then result = \"H\";\n else result = \"T\";\n output;\n end;\nrun;\n```\n\n## Counting Heads and Tails\n\nUse SQL to count how many heads and tails were observed in the simulation.\n\n```{sas}\nproc sql;\n select \n sum(result = \"H\") as heads_count,\n sum(result = \"T\") as tails_count,\n count(*) as total_flips\n into :heads_count, :tails_count, :total_flips\n from coin_flips;\nquit;\n```\n\n## Display the Results\n\nPrint the counts using `%put` statements.\n\n```{sas}\n%put Heads Count: &heads_count;\n%put Tails Count: &tails_count;\n%put Total Flips: &total_flips;\n```\n\n## Perform Binomial Test on Coin Flip Results\n\nUse `proc freq` to check if the observed results differ significantly from the expected probability of 0.5.\n\n```{sas}\nproc freq data=coin_flips;\n tables result / binomial(p=0.5);\nrun;\n```\n\n## Example: Binomial Test in Clinical Trial Data\n\nWe load a clinical dataset and test if the observed death proportion is significantly different from a hypothesized value (e.g., 19%).\n\n### Import Dataset\n\n```{sas}\nproc import datafile='/home/u63532805/CAMIS/lung_cancer.csv'\n out=lung_cancer\n dbms=csv\n replace;\n getnames=yes;\nrun;\n```\n\n### Create Binary Flag for Deaths\n\n```{sas}\ndata lung_cancer;\n set lung_cancer;\n death_flag = (status = 1);\nrun;\n```\n\n### Perform Exact Binomial Test\n\n```{sas}\nproc freq data=lung_cancer;\n tables death_flag / binomial(p=0.19 level='1');\n title \"Exact Binomial Test for Death Proportion\";\nrun;\n```\n\n## SAS Output\n\n### Coin Flip Summary\n\n| heads_count | tails_count | total_flips |\n|-------------|-------------|-------------|\n| 520 | 480 | 1000 |\n\n### Binomial Test on Coin Flips\n\n**The FREQ Procedure**\n\n| result | Frequency | Percent | Cumulative Frequency | Cumulative Percent |\n|--------|-----------|---------|----------------------|--------------------|\n| H | 520 | 52.00 | 520 | 52.00 |\n| T | 480 | 48.00 | 1000 | 100.00 |\n\n**Binomial Proportion for result = H**\n\n- Proportion: 0.5200\\\n- ASE: 0.0158\\\n- 95% Lower Conf Limit: 0.4890\\\n- 95% Upper Conf Limit: 0.5510\n\n**Exact Confidence Limits**\n\n- 95% Lower Conf Limit: 0.4885\\\n- 95% Upper Conf Limit: 0.5514\n\n**Test of H0: Proportion = 0.5**\n\n- ASE under H0: 0.0158\\\n- Z: 1.2649\\\n- One-sided Pr \\> Z: 0.1030\\\n- Two-sided Pr \\> \\|Z\\|: 0.2059\\\n- Sample Size: 1000\n\n### Exact Binomial Test for Death Proportion\n\n**The FREQ Procedure**\n\n| death_flag | Frequency | Percent | Cumulative Frequency | Cumulative Percent |\n|------------|-----------|---------|----------------------|--------------------|\n| 0 | 165 | 72.37 | 165 | 72.37 |\n| 1 | 63 | 27.63 | 228 | 100.00 |\n\n**Binomial Proportion for death_flag = 1**\n\n- Proportion: 0.2763\\\n- ASE: 0.0296\\\n- 95% Lower Conf Limit: 0.2183\\\n- 95% Upper Conf Limit: 0.3344\n\n**Exact Confidence Limits**\n\n- 95% Lower Conf Limit: 0.2193\\\n- 95% Upper Conf Limit: 0.3392\n\n**Test of H0: Proportion = 0.19**\n\n- ASE under H0: 0.0260\\\n- Z: 3.3223\\\n- One-sided Pr \\> Z: 0.0004\\\n- Two-sided Pr \\> \\|Z\\|: 0.0009\\\n- Sample Size: 228\n\n", - "supporting": [ - "binomial_test_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/SAS/ci_for_2indep_prop.qmd b/SAS/ci_for_2indep_prop.qmd index f90936ae8..4fd0246e0 100644 --- a/SAS/ci_for_2indep_prop.qmd +++ b/SAS/ci_for_2indep_prop.qmd @@ -16,7 +16,7 @@ Caution is required if there are no responders (or all responders) in both group The adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `Act` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4. -``` sas +```sas proc import datafile = 'data/adcibc.csv' out = adcibc dbms = csv @@ -42,7 +42,7 @@ run; The below shows that for the Active Treatment, there are 36 responders out of 154 subjects, p1 = 0.2338 (23.38% responders), while for the placebo treatment p2 = 12/77 = 0.1558, giving a risk difference of 0.0779, relative risk 1.50, and odds ratio 1.6525. -``` sas +```sas proc freq data=adcibc2; table trt*resp/ nopct nocol; run; @@ -151,7 +151,7 @@ Similarly for relrisk, although the output does not state that the RR is calcula SAS output often rounds to 3 or 4 decimal places in the output window, however the full values can be obtained using SAS ODS statements. -``` sas +```sas ****************************; *** Risk Difference examples; diff --git a/SAS/ci_for_paired_prop.qmd b/SAS/ci_for_paired_prop.qmd index 0c2a4c22e..dbd3d2731 100644 --- a/SAS/ci_for_paired_prop.qmd +++ b/SAS/ci_for_paired_prop.qmd @@ -217,7 +217,7 @@ There is no equivalent option in PROC FREQ for common relative risks for paired \[Example code to be added for `%PAIRBINCI`\] -``` sas +```sas data dat_used; do id = 1 to 20; trt = 'ACT'; resp = 'Yes'; output; diff --git a/SAS/ci_for_prop.qmd b/SAS/ci_for_prop.qmd index 8b1fbcede..5bc66f26b 100644 --- a/SAS/ci_for_prop.qmd +++ b/SAS/ci_for_prop.qmd @@ -10,7 +10,7 @@ See the [summary page](../method_summary/ci_for_prop_intro.html) for general int The adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `Act` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4. -``` sas +```sas data adcibc2 (keep=trt resp) ; set adcibc; if aval gt 4 then resp="Yes"; @@ -22,14 +22,13 @@ run; The below shows that for the Active Treatment, there are 36 responders out of 154 subjects = 0.2338 (23.38% responders). -``` sas +```sas proc freq data=adcibc2; table trt*resp/ nopct nocol; run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -166,7 +165,7 @@ Caution is required if there are no responders in a group (aside from any issues The output consists of the proportion of resp=Yes, the Asymptotic SE, 95% CIs using normal-approximation method, 95% CI using the Clopper-Pearson method, and then a Binomial test statistic and p-value for the null hypothesis of H0: Proportion = 0.5. -``` sas +```sas proc sort data=adcibc2; by trt; run; @@ -179,7 +178,6 @@ run; ```{r} #| echo: false -#| eval: true #| fig-align: center #| out-width: 50% knitr::include_graphics("../images/ci_for_prop/binomial_prop_pbo.png") @@ -195,7 +193,7 @@ By adding the option `BINOMIAL(LEVEL="Yes" CL=)`, the other C - `BINOMIAL(LEVEL="Yes" CL=WILSON(CORRECT) WALD(CORRECT));`will return Wilson (with continuity correction) and Wald (with continuity correction) -``` sas +```sas proc freq data=adcibc2; table resp/ nopct nocol BINOMIAL(LEVEL = "Yes" @@ -208,13 +206,12 @@ run; ```{r} #| echo: false -#| eval: true #| fig-align: center #| out-width: 50% knitr::include_graphics("../images/ci_for_prop/binomial_prop_all_act.png") ``` -``` sas +```sas proc freq data=adcibc2; table resp/ nopct nocol BINOMIAL(LEVEL="Yes" @@ -227,7 +224,6 @@ run; ```{r} #| echo: false -#| eval: true #| fig-align: center #| out-width: 50% knitr::include_graphics("../images/ci_for_prop/binomial_prop_cc_act.png") diff --git a/SAS/cmh.qmd b/SAS/cmh.qmd index eb742248f..577b2d66d 100644 --- a/SAS/cmh.qmd +++ b/SAS/cmh.qmd @@ -1,7 +1,5 @@ --- title: "CMH Test" -execute: - eval: false --- # Cochran-Mantel-Haenszel Test @@ -16,7 +14,7 @@ When the design of the contingency table is 2 x 2 x K (i.e, X == 2 levels, Y == Below is the syntax to conduct a CMH analysis in SAS: -```{sas} +```sas proc freq data = filtered_data; tables K * X * Y / cmh; * the order of K, X, and Y appearing on the line is important!; @@ -31,7 +29,7 @@ The adcibc data described [here](https://psiaims.github.io/CAMIS/R/cmh.html) is The code used is always the same, however, we can limit the number of levels in each example to show a 2x2x2 case, 2x3xK case etc. -```{sas} +```sas proc freq data = adcibc; tables agegr1 * trtp * sex / cmh; run; @@ -56,7 +54,6 @@ Let's test if there is a difference between 3 treatments (Placebo, Xanomeline lo #| echo: false #| fig-align: center #| out-width: 50% -#| eval: true knitr::include_graphics("../images/cmh/saspage_output2.png") ``` @@ -70,7 +67,7 @@ Risk differences within each strata (for 2 x 2 x K tables) can be obtained by ad The individual treatment comparisons within strata can be useful to explore if the treatment effect is in the same direction and size for each strata, such as to determine if pooling them is in fact sensible. -```{sas} +```sas # Default method is: Wald asymptotic confidence limits proc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne ">80")); tables agegr1 * trtp * sex / cmh riskdiff; @@ -81,13 +78,12 @@ run; #| echo: false #| fig-align: center #| out-width: 50% -#| eval: true knitr::include_graphics("../images/cmh/saspage_output3.png") ``` Note above that exact CI's are not output for the difference betweeen the treatments. You can request SAS output other CI methods as shown below. This outputs the risk difference between the treatments and 95% CI, calculated for each age group strata separately using the Miettinen-Nurminen (score) (MN) method. -```{sas} +```sas # You can change the confidence limits derivation using (cl=xxx) option proc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne "\>80")); tables agegr1 * trtp * sex / cmh riskdiff(cl=mn); @@ -109,7 +105,7 @@ PROC FREQ in the SAS Viya platform has introduced a new COMMONRISKDIFF(CL=MN/MNM See the next section on Common risk differences available in SAS. -```{sas} +```sas # Specifying the Miettinen-Nurminen (score) method proc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne ">80")); tables agegr1 * trtp * sex / cmh riskdiff (common cl=mn); @@ -120,7 +116,6 @@ run; #| echo: false #| fig-align: center #| out-width: 50% -#| eval: true knitr::include_graphics("../images/cmh/saspage_output3c.png") ``` @@ -130,7 +125,7 @@ Including either column=1 or column=2 tells SAS which is your outcome of interes Note that if the data are such that there are no responses in either treatment group, the cross-tabulation will only have 1 column, and SAS PROC FREQ will fail to produce a confidence interval. In this unusual situation however, a valid confidence interval can (and should) still be produced, which may be obtained using the %SCORECI macro available at [https://github.com/petelaud/ratesci-sas/tree/main](#0). -```{sas} +```sas proc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne "\>80")); tables agegr1 * trtp * sex / cmh riskdiff(common column=2); run; @@ -145,7 +140,7 @@ Note that SAS (since v9.3M2 / STAT 12.1) PROC FREQ will produce the Miettinen-Nu Stratified Miettinen-Nurminen CIs have more recently been added to PROC FREQ, but only within the SAS Viya platform (using CL=MN or CL=MNMH in the COMMONRISKDIFF option). The only other known way to have SAS produce these CIs is to use this publicly available macro: [https://github.com/petelaud/ratesci-sas/tree/main](#0). Note that the macro (currently) does not implement Miettinen & Nurminen's proposed iterative weights, but instead the simpler (and similar) MH weights are used. -```{sas} +```sas proc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne ">80")); tables agegr1 * trtp * sex / cmh commonriskdiff(CL=SCORE TEST=SCORE); run; @@ -156,11 +151,10 @@ run; #| echo: false #| fig-align: center #| out-width: 50% -#| eval: true knitr::include_graphics("../images/cmh/saspage_output4.png") ``` -```{sas} +```sas proc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne ">80")); tables agegr1 * trtp * sex / cmh commonriskdiff(CL= newcombe); run; @@ -170,7 +164,6 @@ run; #| echo: false #| fig-align: center #| out-width: 50% -#| eval: true knitr::include_graphics("../images/cmh/saspage_output4b.png") ``` diff --git a/SAS/correlation.qmd b/SAS/correlation.qmd index 03dd2528b..422aad448 100644 --- a/SAS/correlation.qmd +++ b/SAS/correlation.qmd @@ -1,7 +1,5 @@ --- title: "Correlation Analysis using SAS" -execute: - eval: false --- # **Example: Lung Cancer Data** @@ -38,7 +36,7 @@ In contrast, if the data set contains missing values for the analysis variables ## **Pearson Correlation** -```{sas} +```sas proc corr data=lung pearson; var age mealcal; run; @@ -48,7 +46,7 @@ run; ## **Spearman Correlation** -```{sas} +```sas proc corr data=lung spearman; var age mealcal; run; @@ -58,7 +56,7 @@ run; ## Kendall's rank correlation -```{sas} +```sas proc corr data=lung kendall; var age mealcal; run; diff --git a/SAS/count_data_regression.qmd b/SAS/count_data_regression.qmd index 4749975ae..f50418289 100644 --- a/SAS/count_data_regression.qmd +++ b/SAS/count_data_regression.qmd @@ -2,8 +2,6 @@ title: "Poisson and Negative Binomial Regression in SAS" date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- This page serves as an introduction to performing Poisson and Negative Binomial regression in SAS. For detail on how results compare between R and SAS see [RvsSAS](R%20vs%20SAS:%20Negative%20Binomial%20Regression). @@ -40,7 +38,7 @@ It is generally good practice to apply the OM option on the lsmeans statement. T You can use exponential of the maximum likelihood parameter estimate (for treat and age in this example), and the exponential of the Wald 95% Confidence Limits to obtain the odds ratios and 95% CIs. Estimates of the least squares means and CI's for each treatment are output using the lsmeans option. These estimates also have to be back transformed using exponential distribution to get the mean count back onto the original scale. Proc Genmod uses GLM parameterization. -```{sas} +```sas ods output ParameterEstimates=ORs lsmeans=lsm; proc genmod data=polyps; class treat (ref="placebo"); @@ -60,7 +58,6 @@ run; ```{r} #| echo: false -#| eval: true #| fig-align: center #| out-width: 50% knitr::include_graphics("../images/count_data_regression/poisson1.png") @@ -83,7 +80,7 @@ In SAS, we can use proc genmod to perform negative binomial regression. The belo Model parameterization is very similar to poisson -```{sas} +```sas ods output ParameterEstimates=ORs lsmeans=lsm; proc genmod data=polyps; class treat (ref="placebo"); @@ -104,7 +101,6 @@ run; ```{r} #| echo: false -#| eval: true #| fig-align: center #| out-width: 50% knitr::include_graphics("../images/count_data_regression/negbin1.png") diff --git a/SAS/gee.qmd b/SAS/gee.qmd index c75ab079b..a9df37e47 100644 --- a/SAS/gee.qmd +++ b/SAS/gee.qmd @@ -1,7 +1,5 @@ --- title: "Generalized Estimating Equations (GEE) methods in SAS" -execute: - eval: false --- # INTRODUCTION @@ -18,7 +16,7 @@ To uniquely identify subjects, a new variable USUBJID was created by concatenati Additionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. The resulting dataset is saved here: /data/resp.xlsx The following SAS code was used: -```{sas} +```sas proc format; value respmulti 1='Liver' @@ -53,7 +51,7 @@ Predicted probabilities and Odds Ratios (OR) can be obtained in SAS using the `l - `cl` computes confidence intervals. -```{sas} +```sas proc gee data=resp; class trtp(ref="P") avisitn(ref='1') usubjid; model outcome(event='1') = trtp avisitn trtp*avisitn/ dist=bin link=logit; @@ -72,7 +70,6 @@ Results were extracted into a SAS dataset using the `ODS OUTPUT` statement and s [Estimated Parameters:]{.underline} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -82,7 +79,6 @@ knitr::include_graphics("../images/gee/1_estimated_parameters.png") [Probability of event:]{.underline} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -92,7 +88,6 @@ knitr::include_graphics("../images/gee/2_probability_of_event.png") [ODDS RATIO (OR):]{.underline} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -112,7 +107,7 @@ The estimated parameters for each model are detailed below. ORs can be obtained [Ordinal variable:]{.underline} -```{sas} +```sas proc gee data=resp; class trtp(ref="A") avisitn(ref='1') usubjid; model respord=trtp avisitn trtp*avisitn/ dist=multinomial link=cumlogit; @@ -124,7 +119,7 @@ run; [Nominal variable:]{.underline} -```{sas} +```sas proc gee data=resp ; class trtp(ref="A") avisitn(ref='1') usubjid; model respnom(event='Liver')=trtp avisitn trtp*avisitn/ dist=multinomial link=glogit; @@ -138,7 +133,6 @@ run; [Ordinal variable:]{.underline} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -148,7 +142,6 @@ knitr::include_graphics("../images/gee/4_ordinal_variable.png") [Nominal variable:]{.underline} ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% diff --git a/SAS/glmm.qmd b/SAS/glmm.qmd index 13c017fe2..ae4ba2fe4 100644 --- a/SAS/glmm.qmd +++ b/SAS/glmm.qmd @@ -60,8 +60,7 @@ To uniquely identify subjects, a new variable USUBJID was created by concatenati Additionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. The following SAS code was used: -```{r} -#| eval: false +```sas proc format; value respmulti 1='Liver' @@ -83,8 +82,7 @@ GLMM with GHQ approximation can be fitted using `PROC GLIMMIX` by specifying `qu Unlike in GEE models, which computed the robust Sandwich S.E. by default, the GLIMMIX procedure displays the model-based S.E. (also called naïve S.E. in R) by default. -```{r} -#| eval: false +```sas proc glimmix data=resp method=quad(qpoints=5); class trtp(ref="P") avisitn(ref='1') usubjid; model outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW;/*1*/; @@ -108,8 +106,7 @@ knitr::include_graphics("../images/glmm/SAS_Image_1.png") Laplace is a particular GHQ where only one point is used. In SAS, it can be obtained in the method statement using either `method=quad(qpoints=)` or `method=Laplace`. Both approaches return similar results with slight differences in later decimal places (See Appendix 1). -```{r} -#| eval: false +```sas proc glimmix data=resp method=laplace; class trtp(ref="A") avisitn(ref='1') usubjid; model outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW; @@ -131,8 +128,7 @@ The PQL approach uses linear approximations instead of likelihood, making it **l In SAS, this is implemented by default using the Residual Pseudo-Likelihood method (`method=RSPL)`, which is a refinement of PQL which incorporates residual adjustments to better approximate the marginal likelihood, in the GLIMMIX procedure. -```{r} -#| eval: false +```sas proc glimmix data=resp method=rspl; class trtp(ref="A") avisitn(ref='1') usubjid; model outcome=trtp avisitn trtp*avisitn/ dist=bin link=logit solution ddfm=residual; @@ -156,8 +152,7 @@ Additionally, FDA advises "*sponsors to consider using of robust SE method such The example below is done using GHQ with n=5 points, but it also works for Laplace approximation. -```{r} -#| eval: false +```sas proc glimmix data=resp method=quad(qpoints=5) empirical; class trtp(ref="A") avisitn(ref='1') usubjid; model outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW; @@ -186,15 +181,14 @@ Predicted probabilities and odds ratios (ORs) can be obtained in SAS using the ` The example below is done using GHQ with n=5 points, but it also works for Laplace approximation. -```{r} -#| eval: false - proc glimmix data=resp method=quad(qpoints=5) empirical; - class trtp(ref="P") avisitn(ref='1') usubjid; - model outcome=trtp avisitn trtp*avisitn / dist=bin link=logit solution ddfm=betwithin ; - lsmeans trtp*avisitn /cl ilink oddsratio diff; - random intercept /subject=usubjid ; - ods output LSMeans = Lsmeans Diffs=Diffs1; - run; +```sas +proc glimmix data=resp method=quad(qpoints=5) empirical; +class trtp(ref="P") avisitn(ref='1') usubjid; +model outcome=trtp avisitn trtp*avisitn / dist=bin link=logit solution ddfm=betwithin ; +lsmeans trtp*avisitn /cl ilink oddsratio diff; +random intercept /subject=usubjid ; +ods output LSMeans = Lsmeans Diffs=Diffs1; +run; ``` @@ -224,8 +218,7 @@ One notable limitation is that the `LSMEANS` statement does not work as expected ### Ordinal variable -```{r} -#| eval: false +```sas proc glimmix data=resp method=quad(qpoints=5) empirical; class trtp(ref="P") avisitn(ref='1') usubjid; model respord=trtp avisitn trtp*avisitn base / dist=multinomial link=cumlogit solution oddsratio ddfm=betwithin; @@ -244,8 +237,7 @@ knitr::include_graphics("../images/glmm/SAS_Image_7.png") ### Nominal variable -```{r} -#| eval: false +```sas proc glimmix data=resp method=quad(qpoints=5) ; class trtp(ref="P") avisitn(ref='1') usubjid respnom; model respnom(order=freq ref=first)=trtp avisitn trtp*avisitn base / dist=multinomial link=glogit solution ddfm=betwithin ; @@ -267,8 +259,7 @@ knitr::include_graphics("../images/glmm/SAS_Image_8.png") ### Laplace: GLIMMIX with method=Laplace vs method=quad(qpoints=1) -```{r} -#| eval: false +```sas proc glimmix data=resp method=laplace empirical; class trtp(ref="P") avisitn(ref='1') usubjid; model outcome=trtp avisitn trtp*avisitn/ dist=bin link=logit solution ddfm=BW; diff --git a/SAS/gsd-tte.qmd b/SAS/gsd-tte.qmd index 5f0d39365..daebe5f65 100644 --- a/SAS/gsd-tte.qmd +++ b/SAS/gsd-tte.qmd @@ -2,8 +2,6 @@ title: "Group Sequential Design in Survival Endpoints Using SAS" date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- # Introduction @@ -18,7 +16,7 @@ A GSD will be utilized for progression-free survival (PFS). PFS will be tested a The SAS code is shown below: -```{sas} +```sas PROC SEQDESIGN; DESIGN NSTAGES=2 INFO=CUM(0.75 1.0) @@ -41,7 +39,6 @@ RUN; As shown below, a total sample size of 398 is recommended, which equates to 199 in each group. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% diff --git a/SAS/jonchkheere_terpstra.qmd b/SAS/jonchkheere_terpstra.qmd index de441dfe4..2322f45ae 100644 --- a/SAS/jonchkheere_terpstra.qmd +++ b/SAS/jonchkheere_terpstra.qmd @@ -1,7 +1,5 @@ --- title: "SAS Jonckheere-Terpstra Test" -execute: - eval: false --- ## Background @@ -14,7 +12,7 @@ The JT test is particularly well-suited for dose-response or trend analysis with To request Jonckheere-Terpstra test, specify the **JT** option in the Table statement like below: -```{sas} +```sas proc freq; table Var1 * Var2 / JT; Quit; @@ -28,7 +26,7 @@ PROC FREQ also provides exact p-values for the Jonckheere-Terpstra test. You can This dataset has been generated using example data which aligned with the specifications outlined in the section on the Jonckheere–Terpstra test from reference \[5\]. It represents the duration of hospital stays for a randomly selected group of patients across three distinct ICU departments: cardiothoracic, medical, and neurosurgical. -```{sas} +```sas data ICU_Stay; input ICU $ Stay; label Stay = 'Length of Stay in Days'; @@ -62,7 +60,7 @@ run; The code performs a frequency analysis on the 'ICU_Stay' dataset, examining the relationship between 'ICU' and 'Stay' variables. It applies the Jonckheere-Terpstra test using JT option to identify trends in the ordered categorical 'Stay' variable. The output is streamlined by omitting percentages and totals for columns and rows with the 'nopercent nocol norow' options, emphasizing the Jonckheere-Terpstra test outcomes. -```{sas} +```sas proc freq data=ICU_Stay; table ICU * Stay / JT nopercent nocol norow; run; @@ -78,7 +76,7 @@ Comparing this with a standard Normal distribution gives a P value of 0.005, ind This dataset incorporates illustrative data extracted from reference \[3\]. It encapsulates the responses of subjects randomly assigned to one of four treatment arms: placebo, low dosage(20mg), medium dosage(60mg), and high dosage(180mg). The variable of interest is a continuous measure. The variable 'groupn' is used to provide an order of 'group'. -```{sas} +```sas data contin; input groupn group $ subject response; cards; @@ -114,7 +112,7 @@ run; The code is performing a Jonckheere-Terpstra trend test on a continuous 'response' variable, categorized by a 'group' variable, using the 'proc freq' procedure. The analysis is applied to the dataset named 'contin'. The result is presented with a title "Jonckheere-Terpstra Trend Test for Continuous Data", indicating the specific nature of the test being conducted. The 'JT' option is used to specify the Jonckheere-Terpstra test. -```{sas} +```sas proc freq data=contin; tables group * response/JT; title "Jonckheere-Terpstra Trend Test for Continuous Data"; @@ -131,7 +129,7 @@ There is a significant trend across different groups in the response gives a P v With EXACT statement, the exact version and it Monte Carlo approximation can be also conducted. However, it should be noted that the exact test, i.e., a permuation test takes a long time to compelete the task even for a small dataset. -```{sas} +```sas proc freq data = inds; title "Asymptotic p-value calculation"; table ICU * Stay / jt; diff --git a/SAS/kruskal_wallis.qmd b/SAS/kruskal_wallis.qmd index 3b65ee7ae..2a23fcd1e 100644 --- a/SAS/kruskal_wallis.qmd +++ b/SAS/kruskal_wallis.qmd @@ -1,14 +1,12 @@ --- title: "Kruskal Wallis SAS" -execute: - eval: false --- ## Introduction The Kruskal-Wallis test is a non-parametric equivalent to the one-way ANOVA. For this example, the data used is a subset of R's datasets::iris, testing for difference in sepal width between species of flower. This data was subset in R and input manually to SAS with a data step. -```{sas} +```sas data iris_sub; input Species $ Sepal_Width; datalines; @@ -38,7 +36,7 @@ run; The Kruskal-Wallis test can be implemented in SAS using the NPAR1WAY procedure with WILCOXON option. Below, the test is defined with the indicator variable (Species) by the CLASS statement, and the response variable (Sepal_Width) by the VAR statement. Adding the EXACT statement outputs the exact p-value in addition to the asymptotic result. The null hypothesis is that the samples are from identical populations. -```{sas} +```sas proc npar1way data=iris_sub wilcoxon; class Species; var Sepal_Width; @@ -49,7 +47,6 @@ run; ## Results ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 90% diff --git a/SAS/linear-regression.qmd b/SAS/linear-regression.qmd index 262d8f178..95645e35f 100644 --- a/SAS/linear-regression.qmd +++ b/SAS/linear-regression.qmd @@ -2,8 +2,6 @@ title: "Linear Regression" date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- To demonstrate the use of linear regression we examine a dataset that illustrates the relationship between Height and Weight in a group of 237 teen-aged boys and girls. The dataset is available at (../data/htwt.csv) and is imported to sas using proc import procedure. @@ -12,7 +10,7 @@ To demonstrate the use of linear regression we examine a dataset that illustrate The first step is to obtain the simple descriptive statistics for the numeric variables of htwt data, and one-way frequencies for categorical variables. This is accomplished by employing proc means and proc freq procedures There are 237 participants who are from 13.9 to 25 years old. It is a cross-sectional study, with each participant having one observation. We can use this data set to examine the relationship of participants' height to their age and sex. -```{sas} +```sas proc means data=htwt; run; ``` @@ -29,7 +27,7 @@ WEIGHT WEIGHT 237 101.3080169 19.4406980 50.5000000 171.5000000 ---------------------------------------------------------------------------- ``` -```{sas} +```sas proc freq data=htwt; tables sex; run; @@ -48,7 +46,7 @@ m 126 53.16 237 100.00 In order to create a regression model to demonstrate the relationship between age and height for females, we first need to create a flag variable identifying females and an interaction variable between age and female gender flag. -```{sas} +```sas data htwt2; set htwt; if sex="f" then female=1; @@ -63,7 +61,7 @@ run; Next, we fit a regression model, representing the relationships between gender, age, height and the interaction variable created in the datastep above. We again use a where statement to restrict the analysis to those who are less than or equal to 19 years old. We use the clb option to get a 95% confidence interval for each of the parameters in the model. The model that we are fitting is ***height = b0 + b1 x female + b2 x age + b3 x fem_age + e*** -```{sas} +```sas proc reg data=htwt2; where age <=19; model height = female age fem_age / clb; diff --git a/SAS/logistic-regr.qmd b/SAS/logistic-regr.qmd index e848f8dfb..84130d649 100644 --- a/SAS/logistic-regr.qmd +++ b/SAS/logistic-regr.qmd @@ -2,8 +2,6 @@ title: "Logistic Regression in SAS" date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- For a brief description of what is logistic regression see [here](../R/logistic_regr.html). @@ -38,7 +36,7 @@ Below we are fitting trt01pn and sex as categorical variables, age, ph_ecog2 and You can use exponential of the maximum likelihood parameter estimate and the exponential of the Wald 95% Confidence Limits to obtain the odds ratios and 95% CIs. Proc Genmod uses GLM parameterization. -```{sas} +```sas Example data: . = missing, trt01pn (1 or 2), sex (1 or 2), ph_ecog2 (0,1,2,3) wt_gain (1=gain, 0=no gain) ``` @@ -51,7 +49,7 @@ wt_gain trt01pn age sex ph_ecog2 meal_caln 1 2 60 1 2 . ``` -```{sas} +```sas proc genmod data=lung; class trt01pn (ref="1") sex (ref="1"); model wt_gain (event="1") = trt01pn age sex ph_ecog2 meal_caln / @@ -106,7 +104,7 @@ Proc Logistic is often preferred above Proc Genmod as it outputs the Odds Ratios NOTE: that the 95% confidence limits are being calculated using the Wald method. This assumes symmetric intervals around the maximum likelihood estimate using a normal distribution assumption (MLE +/-1.96\* SE). Alternative confidence interval estimation methods exist such as the profile likelihood method but SAS does not calculate these. -```{sas} +```sas proc logistic data=lung; class trt01pn (ref="1") sex (ref="1") /param=glm; model wt_gain(event="1") = trt01pn age sex ph_ecog2 meal_caln; @@ -166,7 +164,7 @@ meal_cal 1.001 1.000 1.002 To compare two logistic models, the -2 \* Log Likelihood from each model can be compared against a $\chi^2$-distribution with degrees of freedom calculated using the difference in the two models' parameters. -```{sas} +```sas Model 1: model wt_gain(event="1") = trt01pn age sex ph_ecog2 meal_caln; ``` @@ -194,7 +192,7 @@ SC 207.491 216.477 SAS also allows us to fit forward or backwards stepwise selection. Below we specify to stop when we have 4 variables left in the model. This is not commonly done in practice but is included to highlight the difference in using a selection procedure compared to doing the difference betweeen the -2 \* log likelihood models using a $\chi^2$-distribution. -```{sas} +```sas proc logistic data=lung; class trt01pn (ref="1") sex (ref="1") /param=glm; model wt_gain(event="1") = trt01pn age sex ph_ecog2 meal_caln/ @@ -229,7 +227,7 @@ This is the SAS default such that if you do not specify the `/param` option, SAS With the EFFECT option, dose_id has 3 levels, and so needs 2 design variables (β1 and β2). Sex has 2 levels so uses just 1 design variable (β1). For dose_id, the reference level (Placebo) is given values of "-1" for both of the β1 and β2 parameters. General model: Y= α + β1x1 + β2x1 {+β3x3} etc, representing each parameter in the model. -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=effect; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -274,7 +272,7 @@ To compare 10mg Active vs Placebo, we would do the following: This equates to a contrast statement as follows: -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=effect; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -292,7 +290,7 @@ To compare the average of (10mg Active and 20mg Active) vs Placebo, we would do This equates to a contrast statement as follows: -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=effect; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -313,7 +311,7 @@ Active (10mg +20mg) vs Placebo 1 1.1610 0.2813 Now let's look at the `param=glm` option. GLM parameterization has a design variable for each level of a parameter. Hence for dose with 3 levels, we have 3 design variables (β1, β2 and β3). -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=glm; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -358,7 +356,7 @@ To compare 10mg Active vs Placebo, we would do the following: This equates to a contrast statement as follows: -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=glm; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -378,7 +376,7 @@ To compare the average of (10mg Active and 20mg Active) vs Placebo, we would do This equates to a contrast statement as follows: -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=glm; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -399,7 +397,7 @@ Active (10mg +20mg) vs Placebo 1 1.1610 0.2813 Now let's look at the `param=ref` option. Similar to param=effect, ref parameterization uses 1 less design variable compared to the number of levels each parameter has, but the parameterization is different. For dose with 3 levels, we have 2 design variables (β1 and β2). -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=ref; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -442,7 +440,7 @@ To compare 10mg Active vs Placebo, we would do the following: This equates to a contrast statement as follows: -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=glm; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -460,7 +458,7 @@ To compare the average of (10mg Active and 20mg Active) vs Placebo, we would do This equates to a contrast statement as follows: -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=glm; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; @@ -481,7 +479,7 @@ Active (10mg +20mg) vs Placebo 1 1.1610 0.2813 The Contrast statement, only outputs the p-value for the contrast, but it is common to also require an estimate of the difference between the treatments, with associated 95% CI. You can do this by changing `contrast` to an `estimate` statement. Note that the parameterization of the contrast remains the same as when using a contrast statement as shown below. These estimates and 95% CI's can be back transformed to give you the Odds ratio of the contrast and associated 95% CI. The estimate coefficients table should be checked for accuracy versus the contrast you are trying to do. -```{sas} +```sas proc logistic data=lung; class dose_id (ref="3") sex (ref="1") /param=glm; model wt_gain(event="1") = dose_id age sex ph_ecog2 meal_caln; diff --git a/SAS/manova.qmd b/SAS/manova.qmd index f7d35b941..4b16969e1 100644 --- a/SAS/manova.qmd +++ b/SAS/manova.qmd @@ -1,7 +1,5 @@ --- title: "Multivariate Analysis of Variance in SAS" -execute: - eval: false --- **Example 39.6 Multivariate Analysis of Variance** from [SAS MANOVA User Guide](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_glm_sect051.htm) @@ -10,7 +8,7 @@ This example employs multivariate analysis of variance (MANOVA) to measure diffe For each of 26 samples of pottery, the percentages of oxides of five metals are measured. The following statements create the data set and invoke the GLM procedure to perform a one-way MANOVA. Additionally, it is of interest to know whether the pottery from one site in Wales (Llanederyn) differs from the samples from other sites; a CONTRAST statement is used to test this hypothesis. -```{sas} +```sas # Example code title "Romano-British Pottery"; data pottery; @@ -58,7 +56,6 @@ After the summary information (1), PROC GLM produces the univariate analyses for **1 Summary Information about Groups** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -68,7 +65,6 @@ knitr::include_graphics("../images/manova/manova1_class.jpg") **2 Univariate Analysis of Variance for Aluminum Oxide (AI)** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -78,7 +74,6 @@ knitr::include_graphics("../images/manova/manova2_anova_ao.jpg") **3 Univariate Analysis of Variance for Iron Oxide (Fe)** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -88,7 +83,6 @@ knitr::include_graphics("../images/manova/manova3_anova_fe.jpg") **4 Univariate Analysis of Variance for Calcium Oxide (Ca)** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -98,7 +92,6 @@ knitr::include_graphics("../images/manova/manova4_anova_ca.jpg") **5 Univariate Analysis of Variance for Magnesium Oxide (Mg)** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -108,7 +101,6 @@ knitr::include_graphics("../images/manova/manova5_anova_mg.jpg") **6 Analysis of Variance for Sodium Oxide (Na)** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -122,7 +114,6 @@ The PRINTE option also displays the partial correlation matrix (7) associated wi **7 Error SSCP Matrix and Partial Correlations** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -136,7 +127,6 @@ Four multivariate tests are computed, all based on the characteristic roots and **8 Hypothesis SSCP Matrix and Multivariate Tests for Overall Site Effect** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -146,7 +136,6 @@ knitr::include_graphics("../images/manova/manova8_hyp_tests.jpg") **9 Hypothesis SSCP Matrix and Multivariate Tests for Differences between Llanederyn and the Other Sites** ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% diff --git a/SAS/mcnemar.qmd b/SAS/mcnemar.qmd index ff3501648..7136b94e9 100644 --- a/SAS/mcnemar.qmd +++ b/SAS/mcnemar.qmd @@ -1,7 +1,5 @@ --- title: "McNemar's test in SAS" -execute: - eval: false --- ### Performing McNemar's test in SAS @@ -12,7 +10,7 @@ To demonstrate McNemar's test in SAS, data concerning the presence or absence of Testing for a significant difference in cold symptoms between ages, using McNemar's test in SAS, can be performed as below. The AGREE option is stated within the FREQ procedure to produce agreement tests and measures, including McNemar's test. -```{sas} +```sas proc freq data=colds; tables age12*age14 / agree; run; @@ -21,7 +19,6 @@ run; #### Results ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 40% diff --git a/SAS/mi_mar_regression.qmd b/SAS/mi_mar_regression.qmd index 2b7e50c30..f5d329d4b 100644 --- a/SAS/mi_mar_regression.qmd +++ b/SAS/mi_mar_regression.qmd @@ -1,7 +1,5 @@ --- title: "Multiple Imputaton: Linear Regression in SAS" -execute: - eval: false --- ## Input dataset preparation before multiple imputation @@ -15,7 +13,7 @@ execute: As PROC MI requires a horizontal, one record per subject data set. More often than not, the data we impute will come from a vertical ADaM BDS data set. So we need to first transpose the aval with the avisitn as ID (assuming avisitn = 1 to 5),creating transposed variable v1-v5. -```{sas} +```sas data dummy; length USUBJID $4; do i=1 to 10; @@ -46,7 +44,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -61,7 +58,7 @@ The pattern can be checked using the following code, missing data pattern could - "Arbitrary" : The missingness of data does not follow any specific order or predictable sequence. Data can be missing at random points without a discernible pattern. -```{sas} +```sas ods select MissPattern; proc mi data=dummyt nimpute=0; var v1 - v5; @@ -71,7 +68,6 @@ run; As below figure shows the missingness dose not follow any specific order, obviously the missing pattern is arbitrary and non-monotone missing pattern. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -82,7 +78,7 @@ knitr::include_graphics( ## FCS Regression for non-monotone missing pattern -```{sas} +```sas proc mi data=dummyt out=outdata nimpute=10 seed=123; class sex1n; var sex1n v1 - v5; @@ -100,7 +96,6 @@ run; - The `DETAILS` option displays the regression coefficients in the regression model used in each imputation. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -108,7 +103,6 @@ knitr::include_graphics("../images/mi_mar_linear_sas/mi_mar_reg_fcs.PNG") ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -119,7 +113,7 @@ knitr::include_graphics("../images/mi_mar_linear_sas/mi_mar_reg_fcs2.PNG") Let's update above SAS code to generate a dummy dataset with monotone missing pattern -```{sas} +```sas data dummy; length USUBJID $4; do i=1 to 10; @@ -156,7 +150,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -167,7 +160,7 @@ knitr::include_graphics( In this case we will use `monotone` statement instead of `FCS` for the imputation, example code as below: -```{sas} +```sas proc mi data=dummyt out=outdata nimpute=10 seed=123; class sex1n; var sex1n v1 - v5; diff --git a/SAS/mmrm.qmd b/SAS/mmrm.qmd index bafc7de25..bf5c0d230 100644 --- a/SAS/mmrm.qmd +++ b/SAS/mmrm.qmd @@ -1,7 +1,5 @@ --- title: "MMRM in SAS" -execute: - eval: false --- # Mixed Models @@ -10,7 +8,7 @@ execute: In SAS the following code was used (assessments at `avisitn=0` should also be removed from the response variable): -```{sas} +```sas proc mixed data=adlbh; where base ne . and avisitn not in (., 99); class usubjid trtpn(ref="0") avisitn; diff --git a/SAS/nparestimate.qmd b/SAS/nparestimate.qmd index eca179f2c..b4b4eb932 100644 --- a/SAS/nparestimate.qmd +++ b/SAS/nparestimate.qmd @@ -1,7 +1,5 @@ --- title: "Non-parametric point estimation in SAS" -execute: - eval: false --- # Introduction @@ -14,7 +12,7 @@ PROC NPAR1WAY provides these estimates in a flexible manner. # Case study -```{sas} +```sas # Hollander-Wolfe-Chicken Example data all; input group $ value; @@ -45,7 +43,7 @@ run; Hodges-Lehmann estimate and Moses confidence interval for the 2-sample case will be generated when putting HL as an option. The direction of the comparison can be controlled via refclass. If the exact confidence interval is required additionally then the exact statement together with the option HL needs to be defined. The Hodges-Lehmann point estimate and confidence interval can be addressed via the HodgesLehmann option under the ODS statement. -```{sas} +```sas proc npar1way hl (refclass = "B") data = all; class group; var value; diff --git a/SAS/ranksum.qmd b/SAS/ranksum.qmd index 79073889d..9167b5ebd 100644 --- a/SAS/ranksum.qmd +++ b/SAS/ranksum.qmd @@ -1,7 +1,5 @@ --- title: "Wilcoxon Rank Sum /Mann-Whitney U test" -execute: - eval: false --- # Wilcoxon Rank Sum / Mann-Whitney U test @@ -14,7 +12,7 @@ To perform a Wilcoxon rank-sum test in SAS, you can use the PROC NPAR1WAY proced 1. **Create the Dataset**: If there are two groups (smoker and non-smoker) with their respective measurements birth weight, you can input the data as follows: -```{sas} +```sas /* Create dataset */ data bw; input bw grp $; @@ -54,7 +52,7 @@ run; 2. **Perform the Wilcoxon rank-sum Test**: Use the PROC NPAR1WAY procedure to perform the test. The wilcoxon option specifies that you want to perform the Wilcoxon rank-sum test. When computing the asymptotic Wilcoxon two-sample test, PROC NPAR1WAY uses a continuity correction by default. If specify the CORRECT=NO option in the PROC NPAR1WAY statement, the procedure does not use a continuity correction. Typically, we will also want the Hodges-Lehman confidence intervals. To get these you will need to add `hl` to the pro npar1way statement. -```{sas} +```sas /* Perform Wilcoxon rank-sum test - with continuity correction by default*/ proc npar1way data=BW wilcoxon hl; class grp; @@ -110,7 +108,7 @@ The correction does not effect the Hodges-Lehman CI. The Location shift is the H For sufficiently small sample size, the large-sample normal approximation used by the asymptotic Wilcoxon might not be appropriate, so the exact statement is needed. -```{sas} +```sas /* Perform Wilcoxon rank-sum test - with continuity correction by default*/ proc npar1way data=BW wilcoxon CORRECT=NO hl; class grp; diff --git a/SAS/rbmi_continuous_joint_SAS.qmd b/SAS/rbmi_continuous_joint_SAS.qmd index 800e15a3c..6c615330d 100644 --- a/SAS/rbmi_continuous_joint_SAS.qmd +++ b/SAS/rbmi_continuous_joint_SAS.qmd @@ -1,7 +1,5 @@ --- title: "Reference-Based Multiple Imputation (joint modelling): Continuous Data" -execute: - eval: false --- ## Reference-based multiple imputation (rbmi) @@ -44,14 +42,13 @@ A publicly available example [dataset](https://r-packages.io/datasets/antidepres The relevant endpoint is the Hamilton 17-item depression rating scale (HAMD17) which was assessed at baseline and at weeks 1, 2, 4, and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects from the active drug and 26% (23/88) of subjects from placebo. All data after study drug discontinuation are missing. -```{sas} +```sas proc print data=dat (obs=10); var PATIENT GENDER THERAPY RELDAYS VISIT BASVAL HAMDTL17 CHANGE; run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 80% @@ -60,14 +57,13 @@ knitr::include_graphics("../images/rbmi/SAS_ExploreData1.PNG") The number of patients per visit and arm are: -```{sas} +```sas proc freq data=dat; table VISIT*THERAPY / norow nocol nopercent nocum; run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 80% @@ -76,7 +72,7 @@ knitr::include_graphics("../images/rbmi/SAS_ExploreData2.PNG") The mean change from baseline of the endpoint (Hamilton 17-item depression rating scale, HAMD17) per visit per treatment group using only the complete cases are: -```{sas} +```sas proc means data=dat n mean nonobs; class VISIT THERAPY; var CHANGE; @@ -84,7 +80,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 80% @@ -93,7 +88,7 @@ knitr::include_graphics("../images/rbmi/SAS_ExploreData3.PNG") The missingness pattern is show below. The incomplete data is primarily monotone in nature. 128 patients have complete data for all visits (all 1’s at each visit). 20, 10 and 13 patients have 1, 2 or 3 monotone missing data, respectively. Further, there is a single additional intermittent missing observation (patient 3618). -```{sas} +```sas proc transpose data=dat out=HAMD_wide(drop=_NAME_) prefix=CHG; by PATIENT THERAPY BASVAL; id VISIT; @@ -106,7 +101,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 80% @@ -117,7 +111,7 @@ knitr::include_graphics("../images/rbmi/SAS_ExploreData4.PNG") A complete case analysis is performed using mixed model for repeated measures (MMRM) with covariates: treatment \[THERAPY\], gender \[GENDER\], visit \[VISIT\] as factors; baseline score \[BASVAL\] as continuous; and visit-by-treatment \[THERAPY \* VISIT\] interaction, and visit-by-baseline \[BASVAL \* VISIT\] interaction. An unstructured covariance matrix is used. The **MIXED** procedure is used. -```{sas} +```sas proc mixed data=dat method=reml; class THERAPY(ref="PLACEBO") VISIT(ref="4") PATIENT GENDER(ref="F"); model CHANGE = THERAPY GENDER VISIT BASVAL THERAPY*VISIT BASVAL*VISIT /s ddfm=satterthwaite; @@ -129,7 +123,6 @@ run; The parameter estimates of the fixed effects are: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 80% @@ -139,7 +132,6 @@ knitr::include_graphics("../images/rbmi/SAS_CompleteCase_fixedEstimates.PNG") The estimated unstructured covariance matrix parameters are: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 80% @@ -151,7 +143,6 @@ knitr::include_graphics( The treatment difference at visit 7 is of interest, and is estimated to be -2.829 (se=1.117) with 95% CI of \[-5.033 to -0.624\] (p=0.0122). ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 90% @@ -178,7 +169,7 @@ Most of the computation time is spent in the Part1B macro where the MCMC procedu To perform reference-based multiple imputation using MAR approach to following code is used -```{sas} +```sas %part1A(jobname = HAMD, Data=dat, Subject=PATIENT, @@ -207,7 +198,7 @@ To perform reference-based multiple imputation using MAR approach to following c To print the results of the contrast at week 7 -```{sas} +```sas proc print data=HAMD_MAR_OUT; where VISIT = "7"; var VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff; @@ -215,7 +206,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 90% @@ -226,7 +216,7 @@ knitr::include_graphics("../images/rbmi/SAS_MAR_contrast.PNG") To perform reference-based multiple imputation using Copy Reference (CR) approach the following changes are needed in part2A of the 5 macros -```{sas} +```sas %part2A(jobname = HAMD_CR, inname = HAMD, method = CR, @@ -236,7 +226,6 @@ To perform reference-based multiple imputation using Copy Reference (CR) approac The results for M=500 imputations are ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 90% @@ -247,7 +236,7 @@ knitr::include_graphics("../images/rbmi/SAS_MNAR_CR_contrast.PNG") To perform reference-based multiple imputation using Jump to Reference (J2R) approach the following changes are needed in part2A of the 5 macros -```{sas} +```sas %part2A(jobname = HAMD_J2R, inname = HAMD, method = J2R, @@ -257,7 +246,6 @@ To perform reference-based multiple imputation using Jump to Reference (J2R) app The results for M=500 imputations are ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 90% @@ -268,7 +256,7 @@ knitr::include_graphics("../images/rbmi/SAS_MNAR_J2R_contrast.PNG") To perform reference-based multiple imputation using Copy Increments in Reference (CIR) approach the following changes are needed in part2A of the 5 macros -```{sas} +```sas %part2A(jobname = HAMD_CIR, inname = HAMD, method = CIR, @@ -278,7 +266,6 @@ To perform reference-based multiple imputation using Copy Increments in Referenc The results for M=500 imputations are ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 90% @@ -317,7 +304,7 @@ Part 3 of the 5 macros carries out a univariate ANOVA analysis at selected time Since, all imputed datasets are readily available (after part2B), another possibility is to analyse each imputed dataset using the analysis model of your choice, and combining the results using `PROC MIANALYZE`. For example, suppose an MMRM should be fit on each imputed dataset: -```{sas} +```sas data HAMD_CR_DATAFULL; set HAMD_CR_DATAFULL; _Imputation_ = draw; @@ -342,7 +329,6 @@ run; The results for M=500 imputations are ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 90% diff --git a/SAS/recurrent_events.qmd b/SAS/recurrent_events.qmd index 6984bdf08..8d45f83c3 100644 --- a/SAS/recurrent_events.qmd +++ b/SAS/recurrent_events.qmd @@ -1,18 +1,14 @@ --- title: "SAS Recurrent Events" -execute: - eval: false --- ```{r} -#| eval: true #| label: setup #| include: FALSE knitr::opts_chunk$set(echo = TRUE) ``` ```{r} -#| eval: true #| label: libaries #| include: FALSE library(dplyr) @@ -89,7 +85,6 @@ For both versions of the Andersen-Gill model, the data must be structured as fol This can be visually represented: ```{r} -#| eval: true #| label: AG_lineplot #| echo: FALSE #| fig-align: 'center' @@ -140,7 +135,6 @@ For the total time model, the data must be structured as follows: This can be visually represented: ```{r} -#| eval: true #| label: PWPtt_lineplot #| echo: FALSE #| fig-align: 'center' @@ -173,7 +167,6 @@ For the gap time model, the data must be structured as follows: This can be visually represented: ```{r} -#| eval: true #| label: PWPgt_lineplot #| echo: FALSE #| fig-align: 'center' @@ -244,7 +237,6 @@ Note that, because the ordering of events is not important in the Andersen-Gill A nice visual representation of the stratification and time interval structure of each model is given below. The correct data structure is pivotal when modelling recurrent events and depends on the methodology you want to use, as illustrated in the figure. ```{r} -#| eval: true #| label: combined_lineplot #| echo: FALSE #| fig-align: 'center' @@ -289,14 +281,12 @@ Importantly, both datasets collect the data in a **counting process** structure. Let's look more closely at the `bladder2` and `bladder` data: ```{r} -#| eval: true #| label: bladder2 bladder2 <- survival::bladder2 gt(head(bladder2, 6)) ``` ```{r} -#| eval: true #| label: bladder2_enum bladder2 %>% group_by(enum) %>% summarise(n = n()) %>% gt() @@ -305,14 +295,12 @@ bladder2 %>% In `bladder2`, in the Andersen-Gill format, each subject has a variable amount of records, depending on the amount of events that subject experienced. ```{r} -#| eval: true #| label: bladder bladder <- survival::bladder gt(head(bladder, 20)) ``` ```{r} -#| eval: true #| label: bladder_enum bladder %>% group_by(enum) %>% summarise(n = n()) %>% gt() @@ -351,7 +339,7 @@ And the data structure must be: We will use the `bladder2` data for this. -```{sas} +```sas proc phreg data=bladder2 covs(aggregate); class rx (ref='1'); model (tstart, tstop) * event(0) = rx size number /rl; @@ -373,7 +361,7 @@ By including the `covs(aggregate)` option and setting `id subjid;`, SAS will com The original Andersen-Gill model of 1989 can be fitted by changing `covs(aggregate)` to `covs` in the procedure, while excluding `id subjid;`. -```{sas} +```sas proc phreg data=bladder2 covs; class rx (ref='1'); model (tstart, tstop) * event(0) = rx size number /rl; @@ -381,7 +369,6 @@ run; ``` ```{r} -#| eval: true #| label: AG_original #| echo: FALSE #| fig-align: 'center' @@ -415,7 +402,7 @@ And the data structure must be: We will use the `bladder2` data for this. -```{sas} +```sas proc phreg data=bladder2 covs(aggregate); class rx (ref='1'); model (tstart, tstop) * event(0) = rx size number /rl; @@ -425,7 +412,6 @@ run; ``` ```{r} -#| eval: true #| label: PWPtt #| echo: FALSE #| fig-align: 'center' @@ -457,7 +443,7 @@ And the data structure must be: This data structure can be achieved in `bladder2` by adding a `gtime` variable. -```{sas} +```sas data bladder2; set bladder2; gtime = tstop - tstart; @@ -466,7 +452,7 @@ run; We artificially set start = 0 for each gap time interval by including `gtime` instead of `(start, stop)` in the `model` statement. -```{sas} +```sas proc phreg data=bladder2 covs(aggregate); class rx (ref='1'); model gtime * event(0) = rx size number/rl; @@ -476,7 +462,6 @@ run; ``` ```{r} -#| eval: true #| label: PWPgt #| echo: FALSE #| fig-align: 'center' @@ -520,7 +505,7 @@ And the data structure must be: We will use the `bladder` data for this. -```{sas} +```sas proc phreg data=bladder covs(aggregate); class rx (ref='1'); model tstop * event(0) = rx size number /rl; @@ -530,7 +515,6 @@ run; ``` ```{r} -#| eval: true #| label: WLW #| echo: FALSE #| fig-align: 'center' @@ -551,7 +535,6 @@ Importantly, the strata of the Wei-Lin-Weissfeld model as set by `strata enum;` Summary for Prentice-Williams-Peterson models: ```{r} -#| eval: true #| label: PWP_structure #| echo: FALSE #| fig-align: 'center' @@ -562,7 +545,6 @@ knitr::include_graphics("../images/recurrent_events/SAS_PWP_structure.png") Summary for Wei-Lin-Weissfeld model: ```{r} -#| eval: true #| label: WLW_structure #| echo: FALSE #| fig-align: 'center' @@ -608,7 +590,7 @@ For the Prentice-Williams-Peterson and Wei-Lin-Weissfeld models we can incorpora To get event-specific estimates for the treatment effect (**rx**), we first need to introduce four new **rx** variables to the `bladder2` and `bladder` datasets, one for each stratum. -```{sas} +```sas data bladder2; set bladder2; rx_enum1 = rx*(enum=1); @@ -618,7 +600,7 @@ data bladder2; run; ``` -```{sas} +```sas data bladder; set bladder; rx_enum1 = rx*(enum=1); @@ -634,7 +616,7 @@ With these four interaction variables, we need to specify `rx_enum1-rx_enum4` in **Total time model** -```{sas} +```sas proc phreg data=bladder2 covs(aggregate); class enum / param=glm; model (tstart, tstop) * event(0) = rx_enum1-rx_enum4 size number /rl; @@ -644,7 +626,6 @@ run; ``` ```{r} -#| eval: true #| label: PWPtt_stratified #| echo: FALSE #| fig-align: 'center' @@ -654,7 +635,7 @@ knitr::include_graphics("../images/recurrent_events/SAS_PWPtt_stratified.png") **Gap time model** -```{sas} +```sas proc phreg data=bladder2 covs(aggregate); class enum / param=glm; model gtime * event(0) = rx_enum1-rx_enum4 size number/rl; @@ -664,7 +645,6 @@ run; ``` ```{r} -#| eval: true #| label: PWPgt_stratified #| echo: FALSE #| fig-align: 'center' @@ -674,7 +654,7 @@ knitr::include_graphics("../images/recurrent_events/SAS_PWPgt_stratified.png") #### Wei-Lin-Weissfeld model -```{sas} +```sas proc phreg data=bladder covs(aggregate); class enum / param=glm; model tstop * event(0) = rx_enum1-rx_enum4 size number /rl; @@ -684,7 +664,6 @@ run; ``` ```{r} -#| eval: true #| label: WLW_stratified #| echo: FALSE #| fig-align: 'center' diff --git a/SAS/rmst.qmd b/SAS/rmst.qmd index 99c2a98fd..09ffa670a 100644 --- a/SAS/rmst.qmd +++ b/SAS/rmst.qmd @@ -2,8 +2,6 @@ title: "Restricted Mean Survival Time (RMST) in SAS" date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- Under the situation where you time to event outcome has non-proportional hazards over time, the commonly used Cox Proportional Hazards regression analysis and the log-rank test can be invalid - especially when the survival curves are crossing. One alternative is to analyse the Restricted Mean Survival Time (RMST). @@ -26,7 +24,6 @@ A common mistake is to get the (0) or (1) indicator the wrong way around. For ex Throughout this page, we will use cnsr(1) approach. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -38,7 +35,6 @@ knitr::include_graphics("../images/rmst/SASrmstreg2.png") The selection of `tau` for RMST analysis is very important. It's the period of time the average (or AUC for proc lifetest) is calculated over. If events are no longer occurring on both treatment groups, then you may not be looking at the key time period of interest. Therefore, it is better practice, to calculate `tau` yourself and set this as an option in the SAS code, (commonly based on the minimum time of the last event observed in each treatment group). ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -77,7 +73,7 @@ It is good practice to first view the shape of your Kaplan-Meier curves. As you It is very important to pre-specify your approach for selection of `tau`. As you can see from the curves, if we compared the period 0 to 6 months, vs 0 to 18 months, we would get very different results for the treatment comparison. -```{sas} +```sas proc lifetest data=adcibc conftype=log; time time*cnsr(1); strata trt01pn; @@ -85,7 +81,6 @@ Run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -96,7 +91,7 @@ knitr::include_graphics("../images/rmst/kmplot.png") The code below calculates `tau` as the minimum time of the last event observed in each treatment group. This will be the period of time, our AUC will be calculated over. As cnsr=0 are events, we only select these observations. Below, the maximum event in treatment 1 = 883 days and in treatment 2 = 350 days. We therefore set `tau` = 350. This method avoids including in the AUC a period of time where events are no longer occurring in both treatments. You can see why setting `tau` is very important as we are likely to get very different AUCs calculating over the 350 day as opposed to the 883 day period! -```{sas} +```sas proc sort data=lung_cancer (where=(cnsr=0)) out=tau1; by trt01pn time; run; @@ -133,7 +128,7 @@ In the output, its important to check that your event/censoring flag is the righ ### Linear link model - provides estimates of treatment differences -```{sas} +```sas proc rmstreg data=adcibc tau=&_tau; class trt01pn sex; model time*cnsr(1) =trt01pn sex age /link=linear method=ipcw (strata=trt01pn); @@ -143,7 +138,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -153,7 +147,6 @@ knitr::include_graphics("../images/rmst/rmstreg_output1.png") The above model results in a `restricted mean survival time` estimate of 257.16 days on treatment 1 vs 267.04 days on treatment 2. The difference (Active-Placebo) is -9.88 days (95% CI: -39.0 to 19.25, p=0.5061). Hence, there is no evidence of a difference between the treatments with respect to RMST when we look over the Kaplan-Meier plot from 0 to 350 days. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -166,7 +159,7 @@ The code is similar to above. We include the option `exp` on the lsmeans row, si Similar to the linear model, we obtain results of a `restricted mean survival time` estimate of 255.21 days on treatment 1 vs 264.75 days on treatment 2. The difference (Active-Placebo) on the log scale is -0.03667 (95% CI: -0.1493 to 0.07596, p=0.5234) but this is hard to interpret. Hence, once back transformed, the treatment ratio (Active/Placebo) is 0.9640 (95% CI: 0.8613 to 1.0789, p=0.5234). -```{sas} +```sas proc rmstreg data=adcibc tau=&_tau; class trt01pn sex; model time*cnsr(1) =trt01pn sex age /link=log method=ipcw (strata=trt01pn); @@ -176,7 +169,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -187,7 +179,7 @@ knitr::include_graphics("../images/rmst/rmstreg_output3.png") The pseudo-observations method [Andersen, Hansen and Klein 2004](https://pubmed.ncbi.nlm.nih.gov/15690989/), is available in SAS using the method=pv option. You use the link=linear or link=log options and output is similarly interpreted as described for Method 1 IPCW method. -```{sas} +```sas proc rmstreg data=adcibc tau=&_tau; class trt01pn sex ; model time*cnsr(1) =trt01pn sex age /link=linear method=pv; @@ -200,7 +192,7 @@ run; A non-parametric method to calculate the RMST is available using the AUC Kaplan-Meier curves. -```{sas} +```sas proc lifetest data=adcibc plots=(rmst s) rmst (tau=&_tau); time time*cnsr(1) ; strata trt01pn / diff=control('2') ; @@ -235,7 +227,6 @@ knitr::include_graphics("../images/rmst/rmstreg_output4.png") ### Version ```{r} -#| eval: true #| echo: false si <- sessioninfo::session_info("rmst", dependencies = FALSE) si$external <- structure( diff --git a/SAS/rounding.qmd b/SAS/rounding.qmd index 715dd5cf2..a72a652fb 100644 --- a/SAS/rounding.qmd +++ b/SAS/rounding.qmd @@ -1,7 +1,5 @@ --- title: "Rounding in SAS" -execute: - eval: false --- SAS provides two distinct rounding functions that handle tie-breaking (values exactly halfway between two numbers) differently: @@ -13,7 +11,7 @@ The key difference appears when rounding values that are exactly halfway between ```{r} #| include: false - +#| eval: false library(sasquatch) sasquatch::sas_connect() @@ -21,7 +19,6 @@ sasquatch::sas_connect() ```{r} #| include: false -#| eval: true sas_code_01 <- " @@ -51,6 +48,8 @@ run; ```{r} #| include: false +#| eval: false + widget_01 <- sasquatch::sas_run_string(sas_code_01, capture = "listing") saveRDS(widget_01, "SAS/rds/rounding/widget_01.rds") @@ -58,7 +57,6 @@ saveRDS(widget_01, "SAS/rds/rounding/widget_01.rds") ```{r} #| echo: false -#| eval: true #| results: asis cat("```txt\n") cat(sas_code_01) @@ -67,7 +65,6 @@ cat("```") ```{r} #| echo: false -#| eval: true readRDS(file = "rds/rounding/widget_01.rds") ``` @@ -77,7 +74,6 @@ These precision issues are inherent to how computers store decimal numbers, not ```{r} #| include: false -#| eval: true sas_code_02 <- " @@ -108,6 +104,7 @@ run; ```{r} #| include: false +#| eval: false widget_02 <- sasquatch::sas_run_string(sas_code_02, capture = "listing") saveRDS(widget_02, file = "SAS/rds/rounding/widget_02.rds") @@ -115,7 +112,6 @@ saveRDS(widget_02, file = "SAS/rds/rounding/widget_02.rds") ```{r} #| echo: false -#| eval: true #| results: asis cat("```txt\n") cat(sas_code_02) @@ -124,7 +120,6 @@ cat("```") ```{r} #| echo: false -#| eval: true readRDS(file = "rds/rounding/widget_02.rds") ``` @@ -132,7 +127,6 @@ The following analysis systematically identifies cases where `round` fails by te ```{r} #| include: false -#| eval: true sas_code_03 <- " @@ -164,7 +158,7 @@ quit; ```{r} #| include: false -#| +#| eval: false widget_03 <- sasquatch::sas_run_string(sas_code_03, capture = "listing") saveRDS(widget_03, file = "SAS/rds/rounding/widget_03.rds") @@ -172,7 +166,6 @@ saveRDS(widget_03, file = "SAS/rds/rounding/widget_03.rds") ```{r} #| echo: false -#| eval: true #| results: asis cat("```txt\n") cat(sas_code_03) @@ -181,7 +174,6 @@ cat("```") ```{r} #| echo: false -#| eval: true readRDS("rds/rounding/widget_03.rds") ``` @@ -189,7 +181,6 @@ This example demonstrates rounding behavior with results from common arithmetic ```{r} #| include: false -#| eval: true sas_code_04 <- " @@ -229,7 +220,7 @@ run; ```{r} #| include: false - +#| eval: false widget_04 <- sasquatch::sas_run_string(sas_code_04, capture = "listing") saveRDS(widget_04, file = "SAS/rds/rounding/widget_04.rds") @@ -237,7 +228,6 @@ saveRDS(widget_04, file = "SAS/rds/rounding/widget_04.rds") ```{r} #| echo: false -#| eval: true #| results: asis cat("```txt\n") cat(sas_code_04) @@ -246,7 +236,6 @@ cat("```") ```{r} #| echo: false -#| eval: true readRDS(file = "rds/rounding/widget_04.rds") ``` diff --git a/SAS/sample_s_StatXact_test_of_trends.qmd b/SAS/sample_s_StatXact_test_of_trends.qmd index 179fbe6fa..9563df2cf 100644 --- a/SAS/sample_s_StatXact_test_of_trends.qmd +++ b/SAS/sample_s_StatXact_test_of_trends.qmd @@ -1,13 +1,10 @@ --- title: "Sample size for K ordered binomial populations - Cochran-Armitage trend test" -execute: - eval: false --- ```{r} #| echo: false #| include: false -#| eval: true knitr::opts_chunk$set(echo = TRUE) ``` @@ -30,7 +27,6 @@ The below parameters are need for the calculations: In StatXact we can directly specify all of the response probabilities, or we can specify the baseline probability and use the below logit model with prespecified slope lambda to derive the rest: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -44,7 +40,6 @@ Let's consider an example of a dose-finding phase I clinical trial of patients w Design parameter are as below: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -53,7 +48,7 @@ knitr::include_graphics("../images/samplesize/StatXact3.PNG") SAS code: -```{sas} +```sas proc sxpowerbin; tr/ex; palpha 0.025; @@ -72,7 +67,6 @@ run; Output from StatXact and results: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -88,7 +82,6 @@ Let's consider an example of a long-term follow-up study of subjects exposed to Design parameter are as below: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -97,7 +90,7 @@ knitr::include_graphics("../images/samplesize/StatXact5.PNG") SAS code: -```{sas} +```sas proc sxpowerbin; tr/ex dist_file=tr; palpha 0.05; @@ -115,7 +108,6 @@ run; Output from StatXact and results: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -129,7 +121,6 @@ Asymptotic power of the test is 77%, a considerable overestimate of the actual p Let's consider an example of the study where the design parameters are as below: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -140,7 +131,7 @@ What is the required sample size to achieve the power of 80% with the significan SAS code: -```{sas} +```sas proc sxpowerbin ti =15; tr/ex; palpha 0.05; diff --git a/SAS/sample_s_equivalence.qmd b/SAS/sample_s_equivalence.qmd index 162fc517b..d327038cb 100644 --- a/SAS/sample_s_equivalence.qmd +++ b/SAS/sample_s_equivalence.qmd @@ -2,8 +2,6 @@ title: "Sample Size for Equivalence Trials in SAS" date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- # Introduction @@ -30,7 +28,7 @@ For a mean $\mu_1$, we are testing if $\mu_1$ is equivalent to some value $\thet A reformulation of a treatment pill, needs to have a weight equivalent to a target value of $\theta$ =130 mg. Weight is assumed normally distributed and an acceptable weight is between 110 mg and 150 mg, hence $\delta=20mg$. The standard deviation of the weight is 50 mg. What sample size is needed assuming an alpha level of 5% with 80% power to conclude the weight is within the margin $\delta$ (the tablet weight is equivalent to 130 milligram). The below shows a sample size of 55 pills is required. -```{sas} +```sas PROC POWER ; onesamplemeans test=equiv lower = 110 @@ -44,7 +42,6 @@ RUN; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -69,7 +66,7 @@ It is anticipated that patients will have the same mean diastolic BP of 96 mmHg A total sample size of 90 is recommended, which equates to a sample size of 45 patients per treatment group. Notice how SAS asks for the `lower` and `upper` bounds, these are derived by using the meandiff $\theta$+/- the acceptable equivalence limit $\delta$ (which is stated as 5 mmHg above). -```{sas} +```sas PROC POWER ; twosamplemeans test=equiv_diff lower = -5 @@ -83,7 +80,6 @@ RUN; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -117,7 +113,7 @@ A client is interested in conducting a clinical trial to compare two cholesterol Below shows a sample size of 140 patients in Total (70 per treatment group). -```{sas} +```sas PROC POWER ; twosamplemeans test=equiv_diff lower = -0.04 @@ -131,7 +127,6 @@ RUN; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -156,7 +151,7 @@ Let's consider a standard standard two-sequence, two period crossover design for The below shows a sample size of 8 patients is required. -```{sas} +```sas PROC POWER; pairedmeans test=equiv_diff lower = -35 @@ -171,7 +166,6 @@ RUN; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -188,7 +182,6 @@ knitr::include_graphics("../images/sample_s_equivalence/SAS2crossovermeans.png") ```{r} #| echo: false -#| eval: true si <- sessioninfo::session_info("sample_s_equivalence", dependencies = FALSE) si$external <- structure( list("SAS" = "9.04.01M7P08062020"), diff --git a/SAS/sample_s_noninferiority.qmd b/SAS/sample_s_noninferiority.qmd index 06186f7eb..7dca29eda 100644 --- a/SAS/sample_s_noninferiority.qmd +++ b/SAS/sample_s_noninferiority.qmd @@ -2,8 +2,6 @@ title: "Sample Size for Non-Inferiority Trials in SAS" date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- # Introduction @@ -20,7 +18,7 @@ This example is a sample size calculation for the following hypotheses: $H_0:\mu A client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients through a parallel design. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). We will consider the situation where the intended trial is for testing noninferiority. For establishing it, suppose the true mean difference is 0 and the noninferiority margin is chosen to be -0.05 (-5%). Assuming SD = 0.1, how many patients are required for an 80% power and an overall significance level of 5%? -```{sas} +```sas PROC POWER; twosamplemeans test=equiv_diff @@ -37,7 +35,6 @@ RUN; As shown below, a total sample size of 102 is recommended, which equates to 51 in each group. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -56,7 +53,7 @@ Let's consider a standard two-sequence, two period crossover design. Suppose tha Alpha = 0.025 is used below, instead of 0.05 because you are doing non-inferiority (a one sided test). Note that this is still the sample size for alpha=0.05. The below shows a sample size of 13 patients is required. -```{sas} +```sas pairedmeans test=equiv_diff lower = -0.3 @@ -70,7 +67,6 @@ test=equiv_diff ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -88,7 +84,6 @@ knitr::include_graphics( ### Version ```{r} -#| eval: true #| echo: false si <- sessioninfo::session_info("sample_s_noninferiority", dependencies = FALSE) si$external <- structure( diff --git a/SAS/sample_s_superiority.qmd b/SAS/sample_s_superiority.qmd index 6d89a3605..918a85c13 100644 --- a/SAS/sample_s_superiority.qmd +++ b/SAS/sample_s_superiority.qmd @@ -3,8 +3,6 @@ title: "Sample Size for Superiority Trials in SAS" output: html_document date: last-modified date-format: D MMMM, YYYY -execute: - eval: false --- SAS has 2 procedures for doing Sample size. A basic summary is provided here based on Jenny Cody's paper^1^ , but see the paper itself for more details. There are also many available options to best to consult SAS online support for [PROC POWER](PROC%20POWER:%20Simple%20AB/BA%20Crossover%20Designs%20::%20SAS/STAT(R)%209.3%20User's%20Guide)^2^ and [PROC GLMPOWER](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_glmpower_a0000000154.htm)^3^. @@ -39,7 +37,7 @@ A client is interested in conducting a clinical trial to compare two cholesterol The code below estimates the sample size in SAS. NOTE: you can either specify the MEANDIFF=8 or if you know the separate group means X and Y, you can use GROUPMEANS =X\|Y code instead. SAS also assume a default alpha level of 0.05, a 1:1 balanced randomization and a Normal distribution. -```{sas} +```sas PROC POWER ; TWOSAMPLEMEANS TEST=DIFF MEANDIFF=8 @@ -53,7 +51,6 @@ RUN; As shown below, a total sample size of 114 is recommended, which equates to 57 in each group. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -70,7 +67,7 @@ Variance of the difference = 2x Variance within patient. Vardiff=2∗Varpatient We wish to run an AB/BA single dose crossover to compare two brochodilators. The primary outcome is peak expiratory flow, and a clinically relevant difference of 30 l/min is sought with 80% power, the significance level is 5% and the best estimate of the within patient standard deviation is 32 l/min. What size of trial do we require? (After recalculating: 32∗2=45 and assuming no period effect and assuming between each pair of measurements on the same subject that we have a 0.5 correlation) -```{sas} +```sas PROC POWER ; PAIREDMEANS TEST=DIFF NPAIRS=. @@ -83,7 +80,6 @@ RUN; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -100,7 +96,6 @@ knitr::include_graphics("../images/sample_s_superiority/SAScrossovermeans.png") ### Version ```{r} -#| eval: true #| echo: false si <- sessioninfo::session_info("sample_s_superiority", dependencies = FALSE) si$external <- structure( diff --git a/SAS/summary-stats.qmd b/SAS/summary-stats.qmd index 0939d8073..d4b0b647e 100644 --- a/SAS/summary-stats.qmd +++ b/SAS/summary-stats.qmd @@ -1,14 +1,12 @@ --- title: "Calculating Quantiles (percentiles) in SAS" -execute: - eval: false --- Percentiles can be calculated in SAS using the UNIVARIATE procedure. The procedure has the option `PCTLDEF` which allows for five different percentile definitions to be used. The default is `PCTLDEF=5`, which uses the empirical distribution function to find percentiles. This is how the 25th and 40th percentiles of `aval` in the dataset `adlb` could be calculated, using the default option for `PCTLDEF`. For quantiles, Q1= 25%, Q2=50%, Q3 = 75%, Q4=100%. -```{sas} +```sas proc univariate data=adlb; var aval; output out=stats pctlpts=25 40 pctlpre=p; diff --git a/SAS/summary_skew_kurt.qmd b/SAS/summary_skew_kurt.qmd index af473a6bc..cb89ded1d 100644 --- a/SAS/summary_skew_kurt.qmd +++ b/SAS/summary_skew_kurt.qmd @@ -1,12 +1,9 @@ --- title: "Skewness/Kurtosis" output: html_document -execute: - eval: false --- ```{r} -#| eval: true #| label: setup #| include: false knitr::opts_chunk$set(echo = TRUE) @@ -20,7 +17,7 @@ In SAS, Skewness and Kurtosis are usually calculated using `PROC MEANS`. The pro The following data was used in this example. -```{sas} +```sas data dat; input team $ points assists; datalines; @@ -54,7 +51,6 @@ The following shows the SAS documentation for the two measures. The [SAS documentation for Skewness](https://documentation.sas.com/doc/en/vdmmlcdc/8.1/casfedsql/p04x27b92gon3gn10e5y5ybxbvmi.htm) is provided here for convenience: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -66,7 +62,6 @@ knitr::include_graphics("../images/summarystats/sas_skewness.png") The SAS documentation for Kurtosis is as follows: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -77,7 +72,7 @@ knitr::include_graphics("../images/summarystats/sas_kurtosis.png") Skewness and Kurtosis are commonly calculated in SAS as follows: -```{sas} +```sas proc means data=dat SKEWNESS KURTOSIS; var points; run; @@ -86,7 +81,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 30% @@ -99,7 +93,7 @@ The above results correspond to the Type 2 methodology in R. The N option produces the following results -```{sas} +```sas proc means data=dat SKEWNESS KURTOSIS vardef = N; var points; run; @@ -108,7 +102,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 30% diff --git a/SAS/survey-stats-summary.qmd b/SAS/survey-stats-summary.qmd index 84731fe7d..cb51b10ba 100644 --- a/SAS/survey-stats-summary.qmd +++ b/SAS/survey-stats-summary.qmd @@ -1,8 +1,6 @@ --- title: "Survey Summary Statistics using SAS" bibliography: ../Comp/survey-stats-summary.bib -execute: - eval: false --- When conducting large-scale trials on samples of the population, it can be necessary to use a more complex sampling design than a simple random sample. @@ -24,7 +22,6 @@ For survey summary statistics in SAS, we can use the `SURVEYMEANS` and `SURVEYFR We will use the [API]((https://r-survey.r-forge.r-project.org/survey/html/api.html)) dataset [@API_2000], which contains a number of datasets based on different samples from a dataset of academic performance. Initially we will just cover the methodology with a simple random sample and a finite population correction to demonstrate functionality. ```{r} -#| eval: true #| echo: false #| message: false library(survey) @@ -41,7 +38,7 @@ head(apisrs) |> If we want to calculate a mean of a variable in a dataset which has been obtained from a **s**imple **r**andom **s**ample such as `apisrs`, in SAS we can do the following (*nb. here `total=6194` is obtained from the constant `fpc` column, and provides the finite population correction*): -```{sas} +```sas proc surveymeans data=apisrs total=6194 mean; var growth; run; @@ -68,7 +65,7 @@ run; To calculate population totals, we can request the `sum`. However SAS requires the user to specify the weights, otherwise the totals will be incorrect. These weights in this case are equivalent to the total population size divided by the sample size: -```{sas} +```sas data apisrs; set apisrs nobs=n; weight = fpc / n; @@ -102,7 +99,7 @@ growth 197589 12949 To perform ratio analysis for means or proportions of analysis variables in SAS, we can use the following: -```{sas} +```sas proc surveymeans data=apisrs total=6194; ratio api00 / api99; run; @@ -139,7 +136,7 @@ api00 api99 200 1.051066 0.003604 1.04395882 To calculate a proportion in SAS, we use the `PROC SURVEYFREQ`, in the simplest case below: -```{sas} +```sas proc surveyfreq data=apisrs total=6194; table 'sch.wide'n / cl; run; @@ -168,7 +165,7 @@ run; To calculate quantiles in SAS, we can use the `quantile` option to request specific quantiles, or can use keywords to request common quantiles (e.g. quartiles or the median). This will use Woodruff's method for confidence intervals, and a custom quantile method [@SAS_2018, pp. 9834]. -```{sas} +```sas proc surveymeans data=apisrs total=6194 quantile=(0.025 0.5 0.975); var growth; run; @@ -200,7 +197,6 @@ run; Much of the previous examples and notes still stand for more complex survey designs, here we will demonstrate using a dataset from NHANES [@NHANES_2010], which uses both stratification and clustering: ```{r} -#| eval: true #| echo: false data("nhanes") @@ -210,7 +206,7 @@ head(nhanes) |> To produce means and standard quartiles for this sample, taking account of sample design, we can use the following: -```{sas} +```sas proc surveymeans data=nhanes mean quartiles; cluster SDMVPSU; strata SDMVSTRA; @@ -252,7 +248,7 @@ run; To produce an analysis of separate subpopulations in SAS we can use the `DOMAIN` statement (note: do not use the `BY` statement as it will not give statistically valid analysis), here we also request the design effect: -```{sas} +```sas proc surveymeans data=nhanes mean deff; cluster SDMVPSU; strata SDMVSTRA; @@ -295,7 +291,6 @@ race Variable Mean of Mean Effect ::: {.callout-note collapse="true" title="Session Info"} ```{r} -#| eval: true #| echo: false si <- sessioninfo::session_info("survey", dependencies = FALSE) si$external <- structure( diff --git a/SAS/survival.qmd b/SAS/survival.qmd index d54e26a34..6beeac0f7 100644 --- a/SAS/survival.qmd +++ b/SAS/survival.qmd @@ -1,7 +1,5 @@ --- title: "Survival Analysis Using SAS" -execute: - eval: false --- The most commonly used survival analysis methods in clinical trials include: @@ -31,7 +29,6 @@ While these models may be explored in a separate document, this particular docum Below is a standard mock-up for survival analysis in clinical trials. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -52,7 +49,7 @@ The data include 500 subjects from the Worcester Heart Attack Study. This study - gender: males = 0, females = 1 - stratification factor -```{sas} +```sas libname mylib "..\data"; data dat; @@ -69,7 +66,7 @@ The KM estimators and log-rank test are from `PROC LIFETEST`, and Cox PH model i ### KM estimators and log-rank test -```{sas} +```sas proc lifetest data=dat outsurv=_SurvEst timelist= 1 3 5 reduceout stderr; time lenfoly*fstat(0); strata afb; @@ -79,7 +76,6 @@ run; The landmark estimates and quartile estimates for AFB = 0 group are as shown in below: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -89,7 +85,6 @@ knitr::include_graphics("../images/survival/sas_km_afib0.png") The logrank test result is in below: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -98,7 +93,7 @@ knitr::include_graphics("../images/survival/sas_logrank.png") ### Cox PH model -```{sas} +```sas proc phreg data = dat; class afb; model lenfol*fstat(0) = afb/rl; @@ -108,7 +103,6 @@ run; The hazard ratio and confidence intervals are shown as below: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -119,7 +113,7 @@ knitr::include_graphics("../images/survival/sas_cox.png") In a stratified model, the Kaplan-Meier estimators remain the same as those in the non-stratified model. To implement stratified log-rank tests and Cox proportional hazards models, simply add the `STRATA` option in both `PROC LIFETEST` and `PROC PHREG`. -```{sas} +```sas # KM estimators and log-rank test proc lifetest data=dat; time lenfoly*fstat(0); diff --git a/SAS/survival_cif.qmd b/SAS/survival_cif.qmd index d72174b5c..40373c0b0 100644 --- a/SAS/survival_cif.qmd +++ b/SAS/survival_cif.qmd @@ -1,7 +1,5 @@ --- title: "Estimating Cumulative Incidence Functions Using SAS" -execute: - eval: false --- ## Objective @@ -9,7 +7,6 @@ execute: In this document we present how to estimate the cumulative incidence function (CIF) in SAS (version 9.4). We focus on the competing risks model where each subject experiences only one out of *k* possible events as depicted in the figure below. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 25% @@ -32,7 +29,7 @@ The bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used SAS code to prepare the data: -```{sas} +```sas proc format; value DiseaseGroup 1='ALL' 2='AML-Low Risk' @@ -56,7 +53,7 @@ run; PROC LIFETEST is used to estimate the CIFs in SAS. For illustration, we model the time to relapse. -```{sas} +```sas ods graphics on; proc lifetest data=bmt plots=cif(test) @@ -76,7 +73,6 @@ Below are selected outputs for comparison with the R outputs in the companion do CIF estimates for time to relapse at selected timepoints for 'AML-Low Risk' patients: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -86,7 +82,6 @@ knitr::include_graphics("../images/survival_competing_risks/cifSAS.jpg") CIF estimates for time to relapses: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% diff --git a/SAS/survival_csh.qmd b/SAS/survival_csh.qmd index 60761f36a..802577645 100644 --- a/SAS/survival_csh.qmd +++ b/SAS/survival_csh.qmd @@ -1,7 +1,5 @@ --- title: "Estimating and Testing Cause Specific Hazard Ratio Using SAS" -execute: - eval: false --- ## Objective @@ -9,7 +7,6 @@ execute: In this document we present how to estimate and test cause specific hazard ratio for the probability of experiencing a certain event at a given time in a competing risks model in SAS (version 9.4). We focus on the basic model where each subject experiences only one out of *k* possible events as depicted in the figure below. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 25% @@ -38,7 +35,7 @@ The bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used - For illustration, a categorical variable `waitCat` is created from `waitTime` as `waitCat = TRUE` if `waitTime > 200`, and `FALSE` otherwise. -```{sas} +```sas proc format; value DiseaseGroup 1='ALL' 2='AML-Low Risk' @@ -65,7 +62,7 @@ run; Starting in SAS/STAT 14.3, all competing events can be estimated together. However, currently this syntax does not allow the `strata` statement. -```{sas} +```sas proc phreg data=Bmt; title 'Cause-Specific Hazard Regression for Relapse and Death without strata'; class Group (order=internal ref=first); @@ -76,7 +73,6 @@ run; The results for both events are given below: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -100,7 +96,7 @@ For more information, please see [Guo C and So Y. (2018)](https://support.sas.co We use `Relapse` as an example. -```{sas} +```sas ods output ParameterEstimates=p1; proc phreg data=bmt; title 'Cause-Specific Hazard Regression for Relapse with strata'; diff --git a/SAS/tipping_point.qmd b/SAS/tipping_point.qmd index 75c19d0c4..506b3668b 100644 --- a/SAS/tipping_point.qmd +++ b/SAS/tipping_point.qmd @@ -1,12 +1,9 @@ --- title: "SAS Tipping Point (Delta Adjustment): Continuous Data" -execute: - eval: false --- ```{r} #| include: false -#| eval: true knitr::opts_chunk$set(echo = TRUE) ``` @@ -46,7 +43,7 @@ The same publicly available [dataset](https://r-packages.io/datasets/antidepress The relevant endpoint for the antidepressant trial was assessed using the Hamilton 17-item depression rating scale (HAMD17), which was measured at baseline and subsequently at weeks 1, 2, 3, 4 and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects in the active drug group, compared to 26% (23/88) of subjects in the placebo group. Importantly, all data after study drug discontinuation are missing and there is a single intermittent missing observation. -```{sas} +```sas proc print data=dat (obs=10); var PATIENT GENDER THERAPY RELDAYS VISIT BASVAL HAMDTL17 CHANGE; run; @@ -55,7 +52,6 @@ run; ```{r} #| label: data_exploration_1_png #| echo: false -#| eval: true #| fig-align: center #| out-width: 80% knitr::include_graphics("../images/tipping_point/SAS_data_exploration_1.PNG") @@ -63,7 +59,7 @@ knitr::include_graphics("../images/tipping_point/SAS_data_exploration_1.PNG") The number of patients per visit and treatment group are: -```{sas} +```sas proc freq data=dat; table VISIT*THERAPY / norow nocol nopercent nocum; run; @@ -80,7 +76,7 @@ knitr::include_graphics("../images/tipping_point/SAS_data_exploration_2.PNG") The mean change from baseline of the HAMD17 endpoint per visit and treatment group using only the complete cases are: -```{sas} +```sas proc means data=dat n mean nonobs; class VISIT THERAPY; var CHANGE; @@ -98,7 +94,7 @@ knitr::include_graphics("../images/tipping_point/SAS_data_exploration_3.PNG") The missingness pattern is: -```{sas} +```sas proc transpose data=dat out=HAMD_wide(drop=_NAME_) prefix=CHG; by PATIENT THERAPY BASVAL; id VISIT; @@ -113,7 +109,6 @@ run; ```{r} #| label: data_exploration_4_png #| echo: false -#| eval: true #| fig-align: center #| out-width: 80% knitr::include_graphics("../images/tipping_point/SAS_data_exploration_4.PNG") @@ -141,7 +136,7 @@ As mentioned, we will illustrate the use of the so-called `five macros` in SAS f To conduct a tipping point analysis under the MAR assumption, we simply specify `method = MAR` under `Part2A()` of the `five macros`. Generally, the rest of `Part1` and `Part2` are the same as in the scenario without any delta adjustment. -```{sas} +```sas %part1A(jobname = HAMD, Data=dat, Subject=PATIENT, @@ -174,7 +169,7 @@ A clear description of these arguments is given in the documentation: To automate the tipping point analysis, you can create a new macro like shown below. The first part of this macro prints all results, while the second part prints the non-significant and significant results separately by filtering on `Probt`. -```{sas} +```sas data all_results; length DELTA 8 VISIT $10 THERAPY $20 _THERAPY $20 Diff SE_Diff df Probt LCL_Diff UCL_Diff 8; stop; @@ -209,7 +204,7 @@ run; knitr::include_graphics("../images/tipping_point/SAS_MAR_all_results.png") ``` -```{sas} +```sas proc sql noprint; create table delta_ge_05 as @@ -249,7 +244,6 @@ run; ```{r} #| label: MAR_non_sig_results_png #| echo: false -#| eval: true #| fig-align: center #| out-width: 80% knitr::include_graphics("../images/tipping_point/SAS_MAR_non_sig_results.png") @@ -258,7 +252,6 @@ knitr::include_graphics("../images/tipping_point/SAS_MAR_non_sig_results.png") ```{r} #| label: MAR_sig_results_png #| echo: false -#| eval: true #| fig-align: center #| out-width: 80% knitr::include_graphics("../images/tipping_point/SAS_MAR_sig_results.png") @@ -269,7 +262,6 @@ To determine the **exact** tipping point between the last "significant" delta an ```{r} #| label: MAR_TP_png #| echo: false -#| eval: true #| fig-align: center #| out-width: 80% knitr::include_graphics("../images/tipping_point/SAS_MAR_TP.png") @@ -284,7 +276,6 @@ A nice visualization of this tipping point analysis for the MAR approach is show ```{r} #| label: MAR_est_pval_plot_png #| echo: false -#| eval: true #| fig-align: center #| out-width: 80% knitr::include_graphics("../images/tipping_point/SAS_MAR_est_pval.png") @@ -316,7 +307,6 @@ Of all considered approaches, the MAR approach yields the largest delta adjustme ```{r} #| label: comparison_est_plot_png #| echo: false -#| eval: true #| fig-align: 'center' #| out-width: '80%' knitr::include_graphics("../images/tipping_point/SAS_comparison_est.png") @@ -325,7 +315,6 @@ knitr::include_graphics("../images/tipping_point/SAS_comparison_est.png") ```{r} #| label: comparison_pval_plot_png #| echo: false -#| eval: true #| fig-align: 'center' #| out-width: '80%' knitr::include_graphics("../images/tipping_point/SAS_comparison_pval.png") @@ -376,7 +365,7 @@ Let's now consider the antidepressant data again. Suppose we apply a delta adjus To program this, we would define `Delta`, `DLag` and `DGroups` in `Part3()` of the `five macros` as follows: -```{sas} +```sas %part3(Jobname = HAMD_MAR, anref=PLACEBO, Delta = 2 2 2 2, @@ -400,7 +389,7 @@ As already illustrated in the tipping point analysis assuming MAR above, you may To apply this delta = 5 to both groups we leave `DGroups` unspecified. -```{sas} +```sas %part3(Jobname = HAMD_MAR, anref=PLACEBO, Delta = 5 5 5 5, diff --git a/SAS/tobit regression SAS.qmd b/SAS/tobit regression SAS.qmd index 1c7bb9b38..bcee028d3 100644 --- a/SAS/tobit regression SAS.qmd +++ b/SAS/tobit regression SAS.qmd @@ -1,7 +1,5 @@ --- title: "Tobit Regression" -execute: - eval: false --- # Tobit regression @@ -26,7 +24,7 @@ $$ with $\epsilon_{i} \sim N(0,\sigma^2)$. But we only observe $y = max(\tau, y^ We assume two equally sized groups (n=10 in each group). The data is censored on the left at a value of $\tau=8.0$. In group A 4/10 records are censored, and 1/10 in group B. -```{sas} +```sas data dat_used; input ID$ ARM$ Y CENS; cards; @@ -60,7 +58,7 @@ The analysis will be based on a Tobit analysis of variance with $Y$, rounded to First a data manipulation step needs to be performed in which the censored values are set to missing for a new variable called *lower*. -```{sas} +```sas data dat_used; set dat_used; if Y <= 8.0 then lower=.; else lower=Y; @@ -69,7 +67,7 @@ run; The data are sorted to make sure the intercept will correspond to the mean of ARM A. -```{sas} +```sas proc sort data=dat_used; by descending ARM; run; @@ -83,7 +81,7 @@ The **LIFEREG** procedure is used for tobit regression. The following model synt Here, if the *lower* value is missing, then the *upper* value is used as a left-censored value. -```{sas} +```sas proc lifereg data=dat_used order=data; class ARM; model (lower, Y) = ARM / d=normal; @@ -95,7 +93,6 @@ run; The fit statistics, type 3 analysis of effects and parameter estimated are shown here. The output provides an estimate of difference between groups A and B (B-A), namely 1.8225 (se=0.8061). The presented p-value is a two-sided p-value based on the Z-test. The scale parameter is an estimate for $\sigma$. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 100% @@ -105,7 +102,6 @@ knitr::include_graphics("../images/tobit/SAS_tobit_1.PNG") The p-value and confidence intervals of the contrast B-A are shown here. The p-value is the same as above. ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 80% diff --git a/SAS/ttest_1Sample.qmd b/SAS/ttest_1Sample.qmd index 4071f9eb3..b0061afdd 100644 --- a/SAS/ttest_1Sample.qmd +++ b/SAS/ttest_1Sample.qmd @@ -1,12 +1,9 @@ --- title: "One Sample t-test in SAS" output: html_document -execute: - eval: false --- ```{r} -#| eval: true #| label: setup #| include: false knitr::opts_chunk$set(echo = TRUE) @@ -18,7 +15,7 @@ In SAS, a one sample t-test is usually performed using PROC TTEST. The one sampl The following data was used in this example. -```{sas} +```sas data read; input score count @@; datalines; @@ -39,7 +36,7 @@ By default, SAS PROC TTEST t-test assumes normality in the data and uses a class The following code was used to test the comparison of a reading scores against a baseline hypothesis value of 30: -```{sas} +```sas proc ttest data=read h0=30; var score; run; @@ -48,7 +45,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -63,7 +59,7 @@ The SAS one sample t-test also supports lognormal analysis for a one sample t-te Using the same data as above, we will set the "DIST" option to "lognormal" to perform this analysis: -```{sas} +```sas proc ttest data=read h0=30 dist=lognormal; var score; run; @@ -72,7 +68,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 60% diff --git a/SAS/ttest_2Sample.qmd b/SAS/ttest_2Sample.qmd index 93e575a8f..aed78b195 100644 --- a/SAS/ttest_2Sample.qmd +++ b/SAS/ttest_2Sample.qmd @@ -1,12 +1,9 @@ --- title: "Independent Two-Sample t-test" output: html_document -execute: - eval: false --- ```{r} -#| eval: true #| label: setup #| include: false knitr::opts_chunk$set(echo = TRUE) @@ -16,7 +13,7 @@ knitr::opts_chunk$set(echo = TRUE) The following data was used in this example. -```{sas} +```sas data d1; length trt_grp $ 9; input trt_grp $ WtGain @@; @@ -42,7 +39,7 @@ Both the Student's t-test and Welch's t-test (the Satterthwaite approximation is For this example, we're testing the significant difference in mean of Weight gain (*WtGain*) between treatment and placebo (*trt_grp*) using PROC TTEST procedure in SAS. -```{sas} +```sas proc ttest data=d1; class trt_grp; var WtGain; @@ -56,7 +53,6 @@ Output: ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -71,7 +67,7 @@ Note: Before entering straight into the t-test we need to check whether the assu 1. Normality: You can check for data to be normally distributed by plotting a histogram of the data by treatment. Alternatively, you can use the Shapiro-Wilk test or the Kolmogorov-Smirnov test. If the test is \<0.05 and your sample is quite small then this suggests you should not use the t-test. However, if your sample in each treatment group is large (say \>30 in each group), then you do not need to rely so heavily on the assumption that the data have an underlying normal distribution in order to apply the two-sample t-test. This is where plotting the data using histograms can help to support investigation into the normality assumption. We have checked the normality of the observations using the code below. Here for both the treatment groups we have P value greater than 0.05 (Shapiro-Wilk test is used), therefore the normality assumption is there for our data. -```{sas} +```sas proc univariate data=d1 normal; qqplot WtGain; by trt_grp; @@ -85,7 +81,6 @@ Output: ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 30% @@ -97,7 +92,6 @@ knitr::include_graphics("../images/ttest/trt_sas.png") ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 30% @@ -113,7 +107,6 @@ Output: ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 30% @@ -125,7 +118,7 @@ knitr::include_graphics("../images/ttest/variance_sas.png") For this example, it is important to use the Welch's t-test (the Satterthwaite approximation is used to calculate the effective degrees of freedom and variance) results. -```{sas} +```sas data d2; length trt_grp $ 9; input trt_grp $ WtGain @@; @@ -154,7 +147,6 @@ Output: ```{r} #| echo: false -#| eval: true #| fig-align: center #| out-width: 50% knitr::include_graphics("../images/ttest/test2.png") diff --git a/SAS/ttest_Paired.qmd b/SAS/ttest_Paired.qmd index ac2962c5e..d8b34ad9b 100644 --- a/SAS/ttest_Paired.qmd +++ b/SAS/ttest_Paired.qmd @@ -1,11 +1,8 @@ --- title: "Paired t-test" -execute: - eval: false --- ```{r} -#| eval: true #| label: setup #| include: false knitr::opts_chunk$set(echo = TRUE) @@ -25,7 +22,7 @@ By default, SAS PROC TTEST t-test assumes normality in the data and uses a class The following data was used in this example. -```{sas} +```sas data pressure; input SBPbefore SBPafter @@; datalines; @@ -39,7 +36,7 @@ data pressure; The following code was used to test the comparison of two paired samples of Systolic Blood Pressure before and after a procedure. -```{sas} +```sas proc ttest data=pressure; paired SBPbefore*SBPafter; run; @@ -48,7 +45,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 50% @@ -61,7 +57,7 @@ The SAS paired t-test also supports analysis of lognormal data. Here is the data ### Data -```{sas} +```sas data auc; input TestAUC RefAUC @@; datalines; @@ -75,7 +71,7 @@ data auc; For cases when the data is lognormal, SAS offers the "DIST" option to chose between a normal and lognormal distribution. The procedure also offers the TOST option to specify the equivalence bounds. -```{sas} +```sas proc ttest data=auc dist=lognormal tost(0.8, 1.25); paired TestAUC*RefAUC; run; @@ -84,7 +80,6 @@ run; Output: ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 70% diff --git a/SAS/wilcoxonsr_HL.qmd b/SAS/wilcoxonsr_HL.qmd index 60e490f72..c1de66ebc 100644 --- a/SAS/wilcoxonsr_HL.qmd +++ b/SAS/wilcoxonsr_HL.qmd @@ -1,7 +1,5 @@ --- title: "Wilcoxon signed-rank test in SAS & StatXact®" -execute: - eval: false --- ### **Introduction** @@ -20,7 +18,7 @@ Again, wilcoxon signed rank test was applied to analyse the time to return to ba Let's consider a case where the dataset has no ties and N (number of observations) = 240. -```{sas} +```sas data TTR; set TTR; diff = TRT_B - TRT_A; @@ -28,7 +26,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -37,7 +34,7 @@ knitr::include_graphics("../images/wilcoxonsr/wsr_data.PNG") In SAS Wilcoxon Signed-Rank test is available using PROC UNIVARIATE. -```{sas} +```sas proc univariate data=TTR alpha=0.1; var diff; @@ -55,14 +52,14 @@ knitr::include_graphics("../images/wilcoxonsr/wsr_240.PNG") Now let's consider a smaller dataset, created by selecting first 19 observations from our main data. -```{sas} +```sas data TTR_19; set TTR; if _N_ <= 19; run; ``` -```{sas} +```sas proc univariate data=TTR_19 alpha=0.1; var diff; @@ -70,7 +67,6 @@ run; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% @@ -98,7 +94,7 @@ StatXact® PROCs for SAS users is a clinical trial analysis software from Cytel ### Dataset without ties and N \> 20 {.unnumbered} -```{sas} +```sas /* Wilxocon S-R test - p values */ PROC PAIRED DATA=WilcoxonSignedRank_TTR ALPHA=0.9; @@ -108,14 +104,13 @@ RUN; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% knitr::include_graphics("../images/wilcoxonsr/wsrSX_240a.PNG") ``` -```{sas} +```sas /* Wilcoxon S-R - H-L estimator and CI */ PROC PAIRED DATA=WilcoxonSignedRank_TTR ALPHA=0.9; @@ -125,7 +120,6 @@ RUN; ``` ```{r} -#| eval: true #| echo: false #| fig-align: center #| out-width: 75% diff --git a/_freeze/Clustering_Knowhow/execute-results/html.json b/_freeze/Clustering_Knowhow/execute-results/html.json deleted file mode 100644 index 09f73ec88..000000000 --- a/_freeze/Clustering_Knowhow/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "1ca6a8639bee36b0c97f44c1c8821342", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Clustering_knowhow\"\nauthor: \"Niladri Dasgupta\"\ndate: \"2024-08-12\"\noutput: html_document\n---\n\n\n\n\n\n## **What is clustering?**\n\nClustering is a method of segregating unlabeled data or data points into different groups/clusters such that similar data points fall in the same cluster than those which differ from the others. The similarity measures are calculated using distance based metrics like Euclidean distance, Cosine similarity, Manhattan distance, etc.\n\nFor Example, In the graph given below, we can clearly see that the data points can be grouped into 3 clusters\n\n![](images/Clustering/clustering_ex.PNG)\n
\n\n## **Type of Clustering Algorithm**\n\nSome of the popular clustering algorithms are:\n\n1. Centroid-based Clustering (Partitioning methods)\n2. Density-based Clustering (Model-based methods)\n3. Connectivity-based Clustering (Hierarchical clustering)\n4. Distribution-based Clustering\n\n### 1.Centroid-based Clustering (Partitioning methods)\n\nPartitioning methods group data points on the basis of their closeness. The similarity measure chosen for these algorithms are Euclidean distance, Manhattan Distance or Minkowski Distance.\n\nThe primary drawback for these algorithms is we need to pre define the number of clusters before allocating the data points to a group.\n\nOne of the popular centroid based clustering technique is K means Clustering.\n
\n\n#### **K Means Clustering**\n\nK means is an iterative clustering algorithm that works in these 5 steps: \n\n1. Specify the desired number of clusters K: Let us choose k=2 for these 5 data points in 2-D space.\n\n ![](images/Clustering/kmeans_1.png)\n\n2. Randomly assign each data point to a cluster: Let’s assign three points in cluster 1, shown using orange color, and two points in cluster 2, shown using grey color.\n\n ![](images/Clustering/kmeans_2.png)\n\n3. Compute cluster centroids: Centroids correspond to the arithmetic mean of data points assigned to the cluster. The centroid of data points in the orange cluster is shown using the orange cross, and those in the grey cluster using a grey cross. \n\n ![](images/Clustering/kmeans_3.png)\n\n4. Assigns each observation to their closest centroid, based on the Euclidean distance between the object and the centroid \n\n ![](images/Clustering/kmeans_4.png)\n\n5. Re-computing the centroids for both clusters.\n\n ![](images/Clustering/kmeans_5.png)\n\n\nWe will repeat the 4th and 5th steps until no further switching of data points between two clusters for two successive repeats.\n
\n\n\n#### K-Means Clustering in R\n\n\n**Step 1: Load packages**\n\nFirst, we’ll load below packages that contain several useful functions regarding k-means clustering in R.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(cluster) #Contain cluster function\nlibrary(dplyr) #Data manipulation\nlibrary(ggplot2) #Plotting function\nlibrary(readr) #Read and write excel/csv files\nlibrary(factoextra) #Extract and Visualize the Results of Multivariate Data Analyses\n```\n:::\n\n\n\n**Step 2: Load Data**\n\nWe have used the “Mall_Customer” dataset in R for this case study.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Loading the data\ndf <- read_csv(\"data/Mall_Customers.csv\")\n\n#Structure of the data\nstr(df)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nspc_tbl_ [200 × 5] (S3: spec_tbl_df/tbl_df/tbl/data.frame)\n $ CustomerID : chr [1:200] \"0001\" \"0002\" \"0003\" \"0004\" ...\n $ Genre : chr [1:200] \"Male\" \"Male\" \"Female\" \"Female\" ...\n $ Age : num [1:200] 19 21 20 23 31 22 35 23 64 30 ...\n $ Annual Income (k$) : num [1:200] 15 15 16 16 17 17 18 18 19 19 ...\n $ Spending Score (1-100): num [1:200] 39 81 6 77 40 76 6 94 3 72 ...\n - attr(*, \"spec\")=\n .. cols(\n .. CustomerID = col_character(),\n .. Genre = col_character(),\n .. Age = col_double(),\n .. `Annual Income (k$)` = col_double(),\n .. `Spending Score (1-100)` = col_double()\n .. )\n - attr(*, \"problems\")= \n```\n\n\n:::\n:::\n\n\n\ndataset consists of 200 customers data with their age, annual income and Spending score. \n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Rename the columns\ndf <- df %>% \n rename(\"Annual_Income\"= `Annual Income (k$)`, \"Spending_score\"= `Spending Score (1-100)`)\n\n#remove rows with missing values\ndf <- na.omit(df)\n\n#scale each variable to have a mean of 0 and sd of 1\ndf1 <- df %>% \n mutate(across(where(is.numeric), scale))\n\n#view first six rows of dataset\nhead(df1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 6 × 5\n CustomerID Genre Age[,1] Annual_Income[,1] Spending_score[,1]\n \n1 0001 Male -1.42 -1.73 -0.434\n2 0002 Male -1.28 -1.73 1.19 \n3 0003 Female -1.35 -1.70 -1.71 \n4 0004 Female -1.13 -1.70 1.04 \n5 0005 Female -0.562 -1.66 -0.395\n6 0006 Female -1.21 -1.66 0.999\n```\n\n\n:::\n:::\n\n\n
\n\nWe have separated the CustomerID and Genre from the dataset. The reason for removing these variables from the cluster dataset as Kmeans can handle only numerical variables. \nTo create cluster with categorical or ordinal variable we can use k-Medoid clustering.\n
\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf1 <- df1[,4:5]\n```\n:::\n\n\n\n**Step 3: Find the Optimal Number of Clusters**\n\nTo perform k-means clustering in R we can use the built-in kmeans() function, which uses the following syntax:\n\n \n kmeans(data, centers, iter.max, nstart)\n where:\n - data: Name of the dataset.\n - centers: The number of clusters, denoted k.\n - iter.max (optional): The maximum number of iterations allowed. Default value is 10.\n - nstart (optional): The number of initial configurations. Default value is 1.\n\n\n \n- Centers is the k of K Means. centers = 5 would results in 5 clusters being created. We need to **predefine the k** before the cluster process starts. \n- iter.max is the number of times the algorithm will repeat the cluster assignment and update the centers / centroids. Iteration stops after this many iterations even if the convergence criterion is not satisfied\n- nstart is the number of times the initial starting points are re-sampled. \nIt means at the initialization of Clusters you need to specify how many clusters you want and the algorithm will randomly find same number of centroids to initialize. nstart gives you an edge to initialize the centroids through re sampling. \nFor example if total number of cluster is 3 and nstart=25 then it extracts 3 sets of data, 25 times, and for each of these times, the algorithm is run (up to iter.max # of iterations) and the cost function (total sum of the squares) is evaluated and finally 3 centroids with lowest cost function are chosen to start the clustering process.\n\n\nTo find the best number of clusters/centroids there are two popular methods as shown below.\n\n[**A. Elbow Method:**]{.underline}\n\nIt has two parts as explained below-\n\n- WSS: The Within Sum of Squares (WSS) is the sum of distance between the centroids and every other data points within a cluster. Small WSS indicates that every data point is close to its nearest centroids.\n\n- Elbow rule/method: Here we plot out the WSS score against the number of K. Because with the number of K increasing, the WSS will always decrease; however, the magnitude of decrease between each k will be diminishing, and the plot will be a curve which looks like an arm that curled up. In this way, we can find out which point falls on the elbow.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(1)\nwss<- NULL\n\n#Feeding different centroid/cluster and record WSS\n\nfor (i in 1:10){\n fit = kmeans(df1,centers = i,nstart=25)\n wss = c(wss, fit$tot.withinss)\n}\n\n#Visualize the plot\nplot(1:10, wss, type = \"o\", xlab='Number of clusters(k)')\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-5-1.png){width=672}\n:::\n:::\n\n\n\nBased on the above plot at k=5 we can see an “elbow” where the sum of squares begins to “bend” or level off so the ideal number of clusters should be 5.\n\n\nThe above process to compute the “Elbow method” has been wrapped up in a single function (fviz_nbclust):\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfviz_nbclust(df1, kmeans, method = \"wss\",nstart=25)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-6-1.png){width=672}\n:::\n:::\n\n\n\n\n[**B. Silhouette Method:**]{.underline}\n\nThe silhouette coefficient or silhouette score is a measure of how similar a data point is within-cluster (intra-cluster) compared to other clusters (inter-cluster). \nThe Silhouette Coefficient is calculated using the mean *intra-cluster distance (a)* and the *mean nearest-cluster distance (b)* for each sample. The Silhouette Coefficient for a sample is *(b - a) / max(a, b)*\n\nHere we will plot the silhouette width/coefficient for different number of clusters and will choose the point where the silhouette width is highest. \n\n**Points to Remember While Calculating Silhouette Coefficient:**\n\nThe value of the silhouette coefficient is between [-1, 1].\nA score of 1 denotes the best, meaning that the data points are very compact within the cluster to which it belongs and far away from the other clusters.\nThe worst value is -1. Values near 0 denote overlapping clusters.\n\nIn this demonstration, we are going to see how silhouette method is used.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsilhouette_score <- function(k){\n km <- kmeans(df1, centers = k,nstart = 25)\n ss <- silhouette(km$cluster, dist(df1))\n mean(ss[, 3])\n}\nk <- 2:10\n\navg_sil <- sapply(k, silhouette_score)\nplot(k, type='b', avg_sil, xlab='Number of clusters', ylab='Average Silhouette Scores', frame=FALSE)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-7-1.png){width=672}\n:::\n:::\n\n\n\nFrom the above method we can see the silhouette width is highest at cluster 5 so the optimal number of cluster should be 5.\n\nSimilar to the elbow method, this process to compute the “average silhoutte method” has been wrapped up in a single function (fviz_nbclust):\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfviz_nbclust(df1, kmeans, method='silhouette',nstart=25)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-8-1.png){width=672}\n:::\n:::\n\n\n\nThe optimal number of clusters is 5.\n\n\n**Step 4: Perform K-Means Clustering with Optimal K**\n\nLastly, we can perform k-means clustering on the dataset using the optimal value for k of 5:\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#make this example reproducible\nset.seed(1)\n\n#perform k-means clustering with k = 5 clusters\nfit <- kmeans(df1, 5, nstart=25)\n#view results\nfit\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nK-means clustering with 5 clusters of sizes 22, 35, 81, 39, 23\n\nCluster means:\n Annual_Income Spending_score\n1 -1.3262173 1.12934389\n2 1.0523622 -1.28122394\n3 -0.2004097 -0.02638995\n4 0.9891010 1.23640011\n5 -1.3042458 -1.13411939\n\nClustering vector:\n [1] 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5\n [38] 1 5 1 5 1 5 3 5 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3\n [75] 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3\n[112] 3 3 3 3 3 3 3 3 3 3 3 3 4 2 4 3 4 2 4 2 4 3 4 2 4 2 4 2 4 2 4 3 4 2 4 2 4\n[149] 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2\n[186] 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4\n\nWithin cluster sum of squares by cluster:\n[1] 5.217630 18.304646 14.485632 19.655252 7.577407\n (between_SS / total_SS = 83.6 %)\n\nAvailable components:\n\n[1] \"cluster\" \"centers\" \"totss\" \"withinss\" \"tot.withinss\"\n[6] \"betweenss\" \"size\" \"iter\" \"ifault\" \n```\n\n\n:::\n:::\n\n\n\nWe can visualize the clusters on a scatterplot that displays the first two principal components on the axes using the fivz_cluster() function:\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#plot results of final k-means model\n\nfviz_cluster(fit, data = df1)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-10-1.png){width=672}\n:::\n:::\n\n\n\n\n**Step 5: Exporting the data by adding generated clusters**\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Adding the clusters in the main data\n\ndf_cluster <- df %>% \n mutate(cluster=fit$cluster)\n\n#Creating Summary of created clusters based on existing variables\n\ndf_summary <- df_cluster %>% \n group_by(cluster) %>% \n summarise(records=n(),avg_age=mean(Age),avg_annual_income=mean(Annual_Income),avg_spending_score=mean(Spending_score))\n\nprint(df_summary)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 5 × 5\n cluster records avg_age avg_annual_income avg_spending_score\n \n1 1 22 25.3 25.7 79.4\n2 2 35 41.1 88.2 17.1\n3 3 81 42.7 55.3 49.5\n4 4 39 32.7 86.5 82.1\n5 5 23 45.2 26.3 20.9\n```\n\n\n:::\n:::\n\n\n\nWe can create a group of potential customers to target based on their age, average annual income and average spending score.\n", - "supporting": [ - "Clustering_Knowhow_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-10-1.png b/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-10-1.png deleted file mode 100644 index fd15cf602..000000000 Binary files a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-10-1.png and /dev/null differ diff --git a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-5-1.png b/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-5-1.png deleted file mode 100644 index c1e20e63a..000000000 Binary files a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-5-1.png and /dev/null differ diff --git a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-6-1.png b/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-6-1.png deleted file mode 100644 index c4a9c6a71..000000000 Binary files a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-6-1.png and /dev/null differ diff --git a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-7-1.png b/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-7-1.png deleted file mode 100644 index 31ca285fd..000000000 Binary files a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-7-1.png and /dev/null differ diff --git a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-8-1.png b/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-8-1.png deleted file mode 100644 index f13e64211..000000000 Binary files a/_freeze/Clustering_Knowhow/figure-html/unnamed-chunk-8-1.png and /dev/null differ diff --git a/_freeze/Comp/r-east_gsd_tte/execute-results/html.json b/_freeze/Comp/r-east_gsd_tte/execute-results/html.json index 4ab24d44f..205f32545 100644 --- a/_freeze/Comp/r-east_gsd_tte/execute-results/html.json +++ b/_freeze/Comp/r-east_gsd_tte/execute-results/html.json @@ -1,11 +1,9 @@ { - "hash": "70409931020d2271604c50b8dc5f9ef5", + "hash": "8836906a81288cda8e91a752b4916545", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs EAST vs SAS: Group sequential design\"\neditor_options: \n chunk_output_type: console\nexecute: \n eval: false\n---\n\n## Introduction\n\nIn this vignette, we briefly compare sample size/power calculations for a group sequential design (GSD) for time to event endpoints between EAST and [gsDesign](https://keaven.github.io/gsDesign/), [gsDesign2](https://merck.github.io/gsDesign2/), and [rpact](https://www.rpact.org/). Note that, a comparison between rpact and gsDesign has been previously reported [here](https://www.rpact.org/vignettes/planning/rpact_vs_gsdesign_examples/#comparison-analysis-time-of-rpact-vs.-gsdesign). Additionally, we present comparative results between SAS `PROC SEQDESIGN` and rpact to provide a comprehensive evaluation framework.\n\nThere are two main methods that are generally used for GSD sample-size/power calculations for time to event endpoints under proportional hazard assumption:\n\n- Lachin & Foulkes (LF) Method (1986)\n- Kim & Tsiatis (KT) Method (1990)\n\nThe main difference between the two methods is that LF method requires specification of accrual duration as well as study duration, while KT method calculates study duration iteratively given accrual rates and accrual duration. In general, these two methods produce similar, but not identical results.\n\nBoth LF and KT methods are implemented in gsDesign and [SAS](https://documentation.sas.com/doc/en/pgmsascdc/v_067/statug/statug_seqdesign_details42.htm#statug.seqdesign.cseqdinputd), while KT method is implemented in EAST and rpact. gsDesign2 uses a modification of the LF method while applying an average hazard ratio (AHR) approach for non-proportional hazards (Schemper, Wakounig, and Heinze, 2009, Yung and Liu 2020). gsDesign2 also enables use of the sample size method of Yung and Liu (2020).\n\nOne additional computational difference to note for EAST vs gsDesign/gsDesign2 is the usage of different log hazard ratio variance assumptions. By default, EAST uses the variance under the null hypothesis and provides an option for using the variance under the alternative hypothesis. gsDesign, on the other hand, is using both of these variances as suggested by Lachin and Foulkes (1986). gsDesign2 has `info_scale` argument in `gsDesign2::gs_power_ahr()`, `gsDesign2::gs_design_ahr()`, which could be set to variance under the null or alternative hypothesis or to the combination of variances.\n\nBelow we provide an example of reproducing EAST results from [this vignette](https://psiaims.github.io/CAMIS/East/gsd-tte.html) using gsDesign/gsDesign2/rpact. As shown in the example, gsDesign2 and rpact can reproduce EAST calculations for GSD boundaries, while gsDesign results have minor differences. Similarly, our comparison between SAS `PROC SEQDESIGN` and rpact shows good agreement in the calculation, with only minimal numerical differences observed. gsDesign has an option under development to support a complete concordance with EAST.\n\n## Design example\n\nWe assume that a GSD is utilized for progression-free survival (PFS) endpoint. It will be tested at one interim analysis (IA) for both efficacy and non-binding futility and then at final analysis (FA). O'Brien-Fleming spending function will be used for efficacy testing and Hwang-Shih-DeCani spending function with $\\gamma = -10$ will be used for futility.\n\nFurther design assumptions are as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# PFS HR=0.6\nhr1_pfs <- 0.6\n# median PFS of 9.4 months in the control arm\nmed_pfs <- 9.4\n# minimum follow-up of 10 months for PFS\nminfu_pfs <- 10\n# Monthly exponential dropout of 0.019 for PFS\ndo_rate_pfs <- 0.019\n# IA timing for PFS is at approximately 75% information fraction, and is derived\n# using the number of events that was calculated by EAST which sets integer event counts to approximate targeted information\ntiming_pfs_rpact <- c(176 / 235, 1)\ntiming_pfs_gs <- c(0.75, 1)\n\n# power of approximately 95% for PFS, EAST reported power will be used\npower_pfs <- 0.9505021\n\n# Enrollment period of 24 months\nenroll_dur <- 24\n# 1:1 randomization ratio\nrand_ratio <- 1\n# alpha level of 1.25%\nalphal <- 0.0125\n```\n:::\n\n\nWe assume that EAST was initially used to calculate the target number of events and the total sample size, and we will use gsDesign/gsDesign2/rpact to reproduce those.\n\nNote that, in EAST the number of target events is reported as an integer, however, gsDesign/gsDesign2/rpact by default provide non-integer values which match *exactly* the specified information fraction. Both gsDesign/gsDesign2 can facilitate computations using integer number of events with `gsDesign::toInteger()` and `gsDesign2::to_integer()` as shown below. In order to reproduce EAST results with rpact, we will use the number of events that was calculated in EAST for `informationRates` argument in `rpact::getDesignGroupSequential()`: 176 and 235 PFS events for IA and FA respectively (please see the `timing_pfs_rpact` object in the code above).\n\nFor ease of comparison the results from EAST are summarized below:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

-2.6606

-0.7379

N=398

p (1-sided)

0.0039

0.2303

Events: 176

HR at bound

0.6696

0.8947

Month: 25

P(Cross) if HR=1

0.0039

0.7697

P(Cross) if HR=0.60

0.7666

0.0040

FA

Z

-2.2798

N=398

p (1-sided)

0.0113

Events: 235

HR at bound

0.7427

Month: 34

P(Cross) if HR=1

0.0125

P(Cross) if HR=0.60

0.9505

\n```\n\n:::\n:::\n\n\n- The comparison between EAST and gsDesign/gsDesign/rpact results is presented below using absolute difference in efficacy/futility boundaries and crossing probabilities up to 4 decimals. Non-zero values are highlighted.\n- Note that, in gsDesign/gsDesign Efficacy/Futility bounds refer to upper/lower bounds respectively, while in EAST these refer to the opposite directions, i.e., lower/upper bounds respectively. For the comparison purposes, we will assume that Efficacy/Futility bounds refer to upper/lower bounds respectively.\n\n## Code to reproduce EAST results\n\n### gsDesign code\n\n- gsDesign code to reproduce the above EAST results:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gsDesign)\n\npfs_gsDesign <- gsDesign::gsSurv(\n k = length(timing_pfs_gs),\n timing = timing_pfs_gs,\n R = enroll_dur,\n eta = do_rate_pfs,\n minfup = minfu_pfs,\n T = enroll_dur + minfu_pfs,\n lambdaC = log(2) / med_pfs,\n hr = hr1_pfs,\n beta = 1 - power_pfs,\n alpha = alphal,\n sfu = sfLDOF,\n sfl = sfHSD,\n sflpar = -10,\n test.type = 4\n) |>\n toInteger()\n\n\npfs_gsDesign |>\n gsDesign::gsBoundSummary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Analysis Value Efficacy Futility\n IA 1: 75% Z 2.6606 0.7422\n N: 400 p (1-sided) 0.0039 0.2290\n Events: 176 ~HR at bound 0.6696 0.8941\n Month: 25 P(Cross) if HR=1 0.0039 0.7710\n P(Cross) if HR=0.6 0.7679 0.0040\n Final Z 2.2798 2.2798\n N: 400 p (1-sided) 0.0113 0.0113\n Events: 235 ~HR at bound 0.7427 0.7427\n Month: 34 P(Cross) if HR=1 0.0125 0.9875\n P(Cross) if HR=0.6 0.9510 0.0490\n```\n\n\n:::\n:::\n\n\n- gsDesign vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0043

N=398

p (1-sided)

0.0000

0.0013

Events: 176

HR at bound

0.0000

0.0006

Month: 25

P(Cross) if HR=1

0.0000

0.0013

P(Cross) if HR=0.60

0.0013

0.0000

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

0.0000

P(Cross) if HR=0.60

0.0005

\n```\n\n:::\n:::\n\n\n### gsDesign2 code\n\n- gsDesign2 code to reproduce the above EAST results appears below.\n- Note that, here `gsDesign2::gs_power_ahr()` is used given the number of target events for each analysis based on EAST results.\n\n\n::: {.cell}\n\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gsDesign2)\nlibrary(tibble)\n\nenroll_rate <- tibble(\n stratum = \"All\",\n duration = enroll_dur,\n rate = 398 / enroll_dur\n)\nfail_rate_pfs <- tibble(\n stratum = \"All\",\n duration = Inf, #could be set to Inf when proportional hazard is assumed\n fail_rate = log(2) / med_pfs,\n hr = hr1_pfs,\n dropout_rate = do_rate_pfs\n)\n\npfs_gsDesign2 <- gs_power_ahr(\n enroll_rate = enroll_rate,\n fail_rate = fail_rate_pfs,\n ratio = rand_ratio,\n event = c(176, 235),\n upper = gs_spending_bound,\n upar = list(\n sf = gsDesign::sfLDOF,\n total_spend = alphal\n ),\n lower = gs_spending_bound,\n lpar = list(\n sf = gsDesign::sfHSD,\n total_spend = 1 - power_pfs,\n param = -10\n ),\n info_scale = \"h0_info\"\n) |>\n to_integer()\n\npfs_gsDesign2 |>\n summary() |>\n gsDesign2::as_gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Bound summary for AHR design
AHR approximations of ~HR at bound
BoundZNominal p1~HR at bound2\n
Cumulative boundary crossing probability
\n
Alternate hypothesisNull hypothesis
Analysis: 1 Time: 25.4 N: 398 Events: 176 AHR: 0.6 Information fraction: 0.75
Futility0.740.23030.89470.00400.7697
Efficacy2.660.00390.66960.76660.0039
Analysis: 2 Time: 34.1 N: 398 Events: 235 AHR: 0.6 Information fraction: 1
Futility2.280.01130.74270.04950.9875
Efficacy2.280.01130.74270.95050.0125
1 One-sided p-value for experimental vs control treatment. Value < 0.5 favors experimental, > 0.5 favors control.
2 Approximate hazard ratio to cross bound.
\n
\n```\n\n:::\n:::\n\n\n- gsDesign2 vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

0.0000

0.0000

P(Cross) if HR=0.60

0.0000

0.0000

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

0.0000

P(Cross) if HR=0.60

0.0000

\n```\n\n:::\n:::\n\n\n### rpact code\n\n- rpact code to reproduce the above EAST results appears below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(rpact)\n\npfs_rpact_gsd <- rpact::getDesignGroupSequential(\n sided = 1,\n alpha = alphal,\n informationRates = timing_pfs_rpact,\n typeOfDesign = \"asOF\",\n beta = 1 - power_pfs,\n typeBetaSpending = \"bsHSD\",\n gammaB = -10,\n bindingFutility = FALSE\n)\n\npfs_rpact <- rpact::getSampleSizeSurvival(\n design = pfs_rpact_gsd,\n accrualTime = enroll_dur,\n followUpTime = minfu_pfs,\n lambda2 = log(2) / med_pfs,\n hazardRatio = hr1_pfs,\n dropoutRate1 = 0.2,\n dropoutRate2 = 0.2,\n dropoutTime = 12\n)\n\nkable(summary(pfs_rpact))\n```\n\n::: {.cell-output-display}\n*Sample size calculation for a survival endpoint*\n\nSequential analysis with a maximum of 2 looks (group sequential design), \none-sided overall significance level 1.25%, power 95.1%.\nThe results were calculated for a two-sample logrank test, \nH0: hazard ratio = 1, H1: hazard ratio = 0.6, control lambda(2) = 0.074, \naccrual time = 24, accrual intensity = 16.6, follow-up time = 10, \ndropout rate(1) = 0.2, dropout rate(2) = 0.2, dropout time = 12.\n\n| Stage | 1 | 2 |\n| ----- | ----- | ----- |\n| Planned information rate | 74.9% | 100% |\n| Cumulative alpha spent | 0.0039 | 0.0125 |\n| Cumulative beta spent | 0.0040 | 0.0495 |\n| Stage levels (one-sided) | 0.0039 | 0.0113 |\n| Efficacy boundary (z-value scale) | 2.661 | 2.280 |\n| Futility boundary (z-value scale) | 0.738 | |\n| Efficacy boundary (t) | 0.670 | 0.743 |\n| Futility boundary (t) | 0.895 | |\n| Cumulative power | 0.7666 | 0.9505 |\n| Number of subjects | 397.9 | 397.9 |\n| Expected number of subjects under H1 | | 397.9 |\n| Cumulative number of events | 176.0 | 235.0 |\n| Expected number of events under H1 | | 189.5 |\n| Analysis time | 25.34 | 34.00 |\n| Expected study duration under H1 | | 27.32 |\n| Overall exit probability (under H0) | 0.7736 | |\n| Overall exit probability (under H1) | 0.7707 | |\n| Exit probability for efficacy (under H0) | 0.0039 | |\n| Exit probability for efficacy (under H1) | 0.7666 | |\n| Exit probability for futility (under H0) | 0.7697 | |\n| Exit probability for futility (under H1) | 0.0040 | |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n\n- rpact vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

0.0000

0.0000

P(Cross) if HR=0.60

0.0000

0.0000

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

0.0000

P(Cross) if HR=0.60

0.0000

\n```\n\n:::\n:::\n\n\n### SAS code\n\n- SAS code to reproduce the above rpact results appears below.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC SEQDESIGN BOUNDARYSCALE=MLE ERRSPEND;\n DESIGN NSTAGES=2 \n INFO=CUM(0.748936170212766 1.0) \n ALT=UPPER \n ALPHA=0.0125 \n BETA=0.05\n METHOD(ALPHA)=ERRFUNCOBF \n METHOD(BETA)=ERRFUNCGAMMA(GAMMA=-10) \n STOP=BOTH(BETABOUNDARY=NONBINDING);\n SAMPLESIZE MODEL=TWOSAMPLESURVIVAL(\n NULLMEDSURVTIME=9.4\n HAZARDRATIO=0.6\n ACCTIME=24 \n FOLTIME=10\n LOSS=EXP(HAZARD=0.018595295942851)\n WEIGHT=1);\n ODS OUTPUT Boundary=BMLE SampleSize=SS SampleSizeSummary=SSS;\nRUN;\n```\n:::\n\n\nThe following shows the events (D) and required sample sizes (N) for IA and FA.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gsd-tte/result-pfs-sas-comp.png){fig-align='center' width=100%}\n:::\n:::\n\n\nPlease note that the `BOUNDARYSCALE=MLE | SCORE | STDZ | PVALUE` options display the boundary values in the MLE, standardize Z, score, and p-value scales, respectively. SAS will provide a boundary information table based on the specified `BOUNDARYSCALE`. In the information table, Alpha indicates the efficacy boundaries, and Beta indicates futility boundaries.\n\nSAS doesn't provide a boundary information with HR, so the HR boundaries is obtained from the MLE boundaries (as MLE $=\\hat{\\theta}=-log(\\text{HR})$, see [SAS User's Guide: Test for Two Survival Distributions with a Log-Rank Test](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_seqdesign_details52.htm#statug.seqdesign.cseqdlogrank)) via the following code.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nDATA BHR;\n SET BMLE;\n Bound_UA_HR=exp(-Bound_UA);\n Bound_UB_HR=exp(-Bound_UB);\n LABEL BOUND_UA_HR=\"Upper Alpha (HR)\" BOUND_UA_HR=\"Upper Beta (HR)\";\nPROC PRINT LABEL;\n VAR _Stage_ _InfoProp_ Bound_UA Bound_UB Bound_UA_HR Bound_UB_HR;\nRUN;\n```\n:::\n\n\nThe HR boundaries are shown below.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gsd-tte/result-pfs-sas-bhr.png){fig-align='center' width=100%}\n:::\n:::\n\n\nThe results calculated by SAS are presneted in the table below. Please note that SAS doesn't report the probablities $P(Cross | HR=1)$ and $P(Cross | HR=0.6)$, resulting in empty cells for these results in the table.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

2.6606

0.7379

N=398

p (1-sided)

0.0039

0.2303

Events: 176

HR at bound

0.6696

0.8947

Month: 25

P(Cross) if HR=1

P(Cross) if HR=0.6

FA

Z

2.2798

N=398

p (1-sided)

0.0113

Events: 235

HR at bound

0.7427

Month: 34

P(Cross) if HR=1

P(Cross) if HR=0.6

\n```\n\n:::\n:::\n\n\n- SAS vs rapct comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

P(Cross) if HR=0.6

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

P(Cross) if HR=0.6

\n```\n\n:::\n:::\n\n\n- SAS vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

diff_eff_sas

diff_fut_sas

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

P(Cross) if HR=0.60

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

P(Cross) if HR=0.60

\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsessionInfo()\n```\n:::\n\n\n## References\n\n- Lachin JM and Foulkes M. Evaluation of sample size and power for analyses of survival with allowance for nonuniform patient entry, losses to follow-up, non-compliance, and stratification. Biometrics 1986;42:507-19.\n- Kim K and Tsiatis AA. Study duration for clinical trials with survival response and early stopping rule. Biometrics 1990(46): 81-92.\n- Schemper M, Wakounig S and Heinze G. The estimation of average hazard ratios by weighted cox regression. Statistics in Medicine 2009; 28(19): 2473-2489.\n- Yung G and Liu Y. Sample size and power for the weighted log-rank test and Kaplan-Meier based tests with allowance for nonproportional hazards. Biometrics 2020;76:939-50.", - "supporting": [ - "r-east_gsd_tte_files" - ], + "markdown": "---\ntitle: \"R vs EAST vs SAS: Group sequential design\"\neditor_options: \n chunk_output_type: console\n---\n\n## Introduction\n\nIn this vignette, we briefly compare sample size/power calculations for a group sequential design (GSD) for time to event endpoints between EAST and [gsDesign](https://keaven.github.io/gsDesign/), [gsDesign2](https://merck.github.io/gsDesign2/), and [rpact](https://www.rpact.org/). Note that, a comparison between rpact and gsDesign has been previously reported [here](https://www.rpact.org/vignettes/planning/rpact_vs_gsdesign_examples/#comparison-analysis-time-of-rpact-vs.-gsdesign). Additionally, we present comparative results between SAS `PROC SEQDESIGN` and rpact to provide a comprehensive evaluation framework.\n\nThere are two main methods that are generally used for GSD sample-size/power calculations for time to event endpoints under proportional hazard assumption:\n\n- Lachin & Foulkes (LF) Method (1986)\n- Kim & Tsiatis (KT) Method (1990)\n\nThe main difference between the two methods is that LF method requires specification of accrual duration as well as study duration, while KT method calculates study duration iteratively given accrual rates and accrual duration. In general, these two methods produce similar, but not identical results.\n\nBoth LF and KT methods are implemented in gsDesign and [SAS](https://documentation.sas.com/doc/en/pgmsascdc/v_067/statug/statug_seqdesign_details42.htm#statug.seqdesign.cseqdinputd), while KT method is implemented in EAST and rpact. gsDesign2 uses a modification of the LF method while applying an average hazard ratio (AHR) approach for non-proportional hazards (Schemper, Wakounig, and Heinze, 2009, Yung and Liu 2020). gsDesign2 also enables use of the sample size method of Yung and Liu (2020).\n\nOne additional computational difference to note for EAST vs gsDesign/gsDesign2 is the usage of different log hazard ratio variance assumptions. By default, EAST uses the variance under the null hypothesis and provides an option for using the variance under the alternative hypothesis. gsDesign, on the other hand, is using both of these variances as suggested by Lachin and Foulkes (1986). gsDesign2 has `info_scale` argument in `gsDesign2::gs_power_ahr()`, `gsDesign2::gs_design_ahr()`, which could be set to variance under the null or alternative hypothesis or to the combination of variances.\n\nBelow we provide an example of reproducing EAST results from [this vignette](https://psiaims.github.io/CAMIS/East/gsd-tte.html) using gsDesign/gsDesign2/rpact. As shown in the example, gsDesign2 and rpact can reproduce EAST calculations for GSD boundaries, while gsDesign results have minor differences. Similarly, our comparison between SAS `PROC SEQDESIGN` and rpact shows good agreement in the calculation, with only minimal numerical differences observed. gsDesign has an option under development to support a complete concordance with EAST.\n\n## Design example\n\nWe assume that a GSD is utilized for progression-free survival (PFS) endpoint. It will be tested at one interim analysis (IA) for both efficacy and non-binding futility and then at final analysis (FA). O'Brien-Fleming spending function will be used for efficacy testing and Hwang-Shih-DeCani spending function with $\\gamma = -10$ will be used for futility.\n\nFurther design assumptions are as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# PFS HR=0.6\nhr1_pfs <- 0.6\n# median PFS of 9.4 months in the control arm\nmed_pfs <- 9.4\n# minimum follow-up of 10 months for PFS\nminfu_pfs <- 10\n# Monthly exponential dropout of 0.019 for PFS\ndo_rate_pfs <- 0.019\n# IA timing for PFS is at approximately 75% information fraction, and is derived\n# using the number of events that was calculated by EAST which sets integer event counts to approximate targeted information\ntiming_pfs_rpact <- c(176 / 235, 1)\ntiming_pfs_gs <- c(0.75, 1)\n\n# power of approximately 95% for PFS, EAST reported power will be used\npower_pfs <- 0.9505021\n\n# Enrollment period of 24 months\nenroll_dur <- 24\n# 1:1 randomization ratio\nrand_ratio <- 1\n# alpha level of 1.25%\nalphal <- 0.0125\n```\n:::\n\n\nWe assume that EAST was initially used to calculate the target number of events and the total sample size, and we will use gsDesign/gsDesign2/rpact to reproduce those.\n\nNote that, in EAST the number of target events is reported as an integer, however, gsDesign/gsDesign2/rpact by default provide non-integer values which match *exactly* the specified information fraction. Both gsDesign/gsDesign2 can facilitate computations using integer number of events with `gsDesign::toInteger()` and `gsDesign2::to_integer()` as shown below. In order to reproduce EAST results with rpact, we will use the number of events that was calculated in EAST for `informationRates` argument in `rpact::getDesignGroupSequential()`: 176 and 235 PFS events for IA and FA respectively (please see the `timing_pfs_rpact` object in the code above).\n\nFor ease of comparison the results from EAST are summarized below:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

-2.6606

-0.7379

N=398

p (1-sided)

0.0039

0.2303

Events: 176

HR at bound

0.6696

0.8947

Month: 25

P(Cross) if HR=1

0.0039

0.7697

P(Cross) if HR=0.60

0.7666

0.0040

FA

Z

-2.2798

N=398

p (1-sided)

0.0113

Events: 235

HR at bound

0.7427

Month: 34

P(Cross) if HR=1

0.0125

P(Cross) if HR=0.60

0.9505

\n```\n\n:::\n:::\n\n\n- The comparison between EAST and gsDesign/gsDesign/rpact results is presented below using absolute difference in efficacy/futility boundaries and crossing probabilities up to 4 decimals. Non-zero values are highlighted.\n- Note that, in gsDesign/gsDesign Efficacy/Futility bounds refer to upper/lower bounds respectively, while in EAST these refer to the opposite directions, i.e., lower/upper bounds respectively. For the comparison purposes, we will assume that Efficacy/Futility bounds refer to upper/lower bounds respectively.\n\n## Code to reproduce EAST results\n\n### gsDesign code\n\n- gsDesign code to reproduce the above EAST results:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gsDesign)\n\npfs_gsDesign <- gsDesign::gsSurv(\n k = length(timing_pfs_gs),\n timing = timing_pfs_gs,\n R = enroll_dur,\n eta = do_rate_pfs,\n minfup = minfu_pfs,\n T = enroll_dur + minfu_pfs,\n lambdaC = log(2) / med_pfs,\n hr = hr1_pfs,\n beta = 1 - power_pfs,\n alpha = alphal,\n sfu = sfLDOF,\n sfl = sfHSD,\n sflpar = -10,\n test.type = 4\n) |>\n toInteger()\n\n\npfs_gsDesign |>\n gsDesign::gsBoundSummary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Analysis Value Efficacy Futility\n IA 1: 75% Z 2.6606 0.7422\n N: 400 p (1-sided) 0.0039 0.2290\n Events: 176 ~HR at bound 0.6696 0.8941\n Month: 25 P(Cross) if HR=1 0.0039 0.7710\n P(Cross) if HR=0.6 0.7679 0.0040\n Final Z 2.2798 2.2798\n N: 400 p (1-sided) 0.0113 0.0113\n Events: 235 ~HR at bound 0.7427 0.7427\n Month: 34 P(Cross) if HR=1 0.0125 0.9875\n P(Cross) if HR=0.6 0.9510 0.0490\n```\n\n\n:::\n:::\n\n\n- gsDesign vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0043

N=398

p (1-sided)

0.0000

0.0013

Events: 176

HR at bound

0.0000

0.0006

Month: 25

P(Cross) if HR=1

0.0000

0.0013

P(Cross) if HR=0.60

0.0013

0.0000

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

0.0000

P(Cross) if HR=0.60

0.0005

\n```\n\n:::\n:::\n\n\n### gsDesign2 code\n\n- gsDesign2 code to reproduce the above EAST results appears below.\n- Note that, here `gsDesign2::gs_power_ahr()` is used given the number of target events for each analysis based on EAST results.\n\n\n::: {.cell}\n\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gsDesign2)\nlibrary(tibble)\n\nenroll_rate <- tibble(\n stratum = \"All\",\n duration = enroll_dur,\n rate = 398 / enroll_dur\n)\nfail_rate_pfs <- tibble(\n stratum = \"All\",\n duration = Inf, #could be set to Inf when proportional hazard is assumed\n fail_rate = log(2) / med_pfs,\n hr = hr1_pfs,\n dropout_rate = do_rate_pfs\n)\n\npfs_gsDesign2 <- gs_power_ahr(\n enroll_rate = enroll_rate,\n fail_rate = fail_rate_pfs,\n ratio = rand_ratio,\n event = c(176, 235),\n upper = gs_spending_bound,\n upar = list(\n sf = gsDesign::sfLDOF,\n total_spend = alphal\n ),\n lower = gs_spending_bound,\n lpar = list(\n sf = gsDesign::sfHSD,\n total_spend = 1 - power_pfs,\n param = -10\n ),\n info_scale = \"h0_info\"\n) |>\n to_integer()\n\npfs_gsDesign2 |>\n summary() |>\n gsDesign2::as_gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Bound summary for AHR design
AHR approximations of ~HR at bound
BoundZNominal p1~HR at bound2\n
Cumulative boundary crossing probability
\n
Alternate hypothesisNull hypothesis
Analysis: 1 Time: 25.4 N: 398 Events: 176 AHR: 0.6 Information fraction: 0.75
Futility0.740.23030.89470.00400.7697
Efficacy2.660.00390.66960.76660.0039
Analysis: 2 Time: 34.1 N: 398 Events: 235 AHR: 0.6 Information fraction: 1
Futility2.280.01130.74270.04950.9875
Efficacy2.280.01130.74270.95050.0125
1 One-sided p-value for experimental vs control treatment. Value < 0.5 favors experimental, > 0.5 favors control.
2 Approximate hazard ratio to cross bound.
\n
\n```\n\n:::\n:::\n\n\n- gsDesign2 vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

0.0000

0.0000

P(Cross) if HR=0.60

0.0000

0.0000

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

0.0000

P(Cross) if HR=0.60

0.0000

\n```\n\n:::\n:::\n\n\n### rpact code\n\n- rpact code to reproduce the above EAST results appears below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(rpact)\n\npfs_rpact_gsd <- rpact::getDesignGroupSequential(\n sided = 1,\n alpha = alphal,\n informationRates = timing_pfs_rpact,\n typeOfDesign = \"asOF\",\n beta = 1 - power_pfs,\n typeBetaSpending = \"bsHSD\",\n gammaB = -10,\n bindingFutility = FALSE\n)\n\npfs_rpact <- rpact::getSampleSizeSurvival(\n design = pfs_rpact_gsd,\n accrualTime = enroll_dur,\n followUpTime = minfu_pfs,\n lambda2 = log(2) / med_pfs,\n hazardRatio = hr1_pfs,\n dropoutRate1 = 0.2,\n dropoutRate2 = 0.2,\n dropoutTime = 12\n)\n\nkable(summary(pfs_rpact))\n```\n\n::: {.cell-output-display}\n*Sample size calculation for a survival endpoint*\n\nSequential analysis with a maximum of 2 looks (group sequential design), \none-sided overall significance level 1.25%, power 95.1%.\nThe results were calculated for a two-sample logrank test, \nH0: hazard ratio = 1, H1: hazard ratio = 0.6, control lambda(2) = 0.074, \naccrual time = 24, accrual intensity = 16.6, follow-up time = 10, \ndropout rate(1) = 0.2, dropout rate(2) = 0.2, dropout time = 12.\n\n| Stage | 1 | 2 |\n| ----- | ----- | ----- |\n| Planned information rate | 74.9% | 100% |\n| Cumulative alpha spent | 0.0039 | 0.0125 |\n| Cumulative beta spent | 0.0040 | 0.0495 |\n| Stage levels (one-sided) | 0.0039 | 0.0113 |\n| Efficacy boundary (z-value scale) | 2.661 | 2.280 |\n| Futility boundary (z-value scale) | 0.738 | |\n| Efficacy boundary (t) | 0.670 | 0.743 |\n| Futility boundary (t) | 0.895 | |\n| Cumulative power | 0.7666 | 0.9505 |\n| Number of subjects | 397.9 | 397.9 |\n| Expected number of subjects under H1 | | 397.9 |\n| Cumulative number of events | 176.0 | 235.0 |\n| Expected number of events under H1 | | 189.5 |\n| Analysis time | 25.34 | 34.00 |\n| Expected study duration under H1 | | 27.32 |\n| Overall exit probability (under H0) | 0.7736 | |\n| Overall exit probability (under H1) | 0.7707 | |\n| Exit probability for efficacy (under H0) | 0.0039 | |\n| Exit probability for efficacy (under H1) | 0.7666 | |\n| Exit probability for futility (under H0) | 0.7697 | |\n| Exit probability for futility (under H1) | 0.0040 | |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n\n- rpact vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

0.0000

0.0000

P(Cross) if HR=0.60

0.0000

0.0000

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

0.0000

P(Cross) if HR=0.60

0.0000

\n```\n\n:::\n:::\n\n\n### SAS code\n\n- SAS code to reproduce the above rpact results appears below.\n\n```sas\nPROC SEQDESIGN BOUNDARYSCALE=MLE ERRSPEND;\n DESIGN NSTAGES=2 \n INFO=CUM(0.748936170212766 1.0) \n ALT=UPPER \n ALPHA=0.0125 \n BETA=0.05\n METHOD(ALPHA)=ERRFUNCOBF \n METHOD(BETA)=ERRFUNCGAMMA(GAMMA=-10) \n STOP=BOTH(BETABOUNDARY=NONBINDING);\n SAMPLESIZE MODEL=TWOSAMPLESURVIVAL(\n NULLMEDSURVTIME=9.4\n HAZARDRATIO=0.6\n ACCTIME=24 \n FOLTIME=10\n LOSS=EXP(HAZARD=0.018595295942851)\n WEIGHT=1);\n ODS OUTPUT Boundary=BMLE SampleSize=SS SampleSizeSummary=SSS;\nRUN;\n```\n\nThe following shows the events (D) and required sample sizes (N) for IA and FA.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gsd-tte/result-pfs-sas-comp.png){fig-align='center' width=100%}\n:::\n:::\n\n\nPlease note that the `BOUNDARYSCALE=MLE | SCORE | STDZ | PVALUE` options display the boundary values in the MLE, standardize Z, score, and p-value scales, respectively. SAS will provide a boundary information table based on the specified `BOUNDARYSCALE`. In the information table, Alpha indicates the efficacy boundaries, and Beta indicates futility boundaries.\n\nSAS doesn't provide a boundary information with HR, so the HR boundaries is obtained from the MLE boundaries (as MLE $=\\hat{\\theta}=-log(\\text{HR})$, see [SAS User's Guide: Test for Two Survival Distributions with a Log-Rank Test](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_seqdesign_details52.htm#statug.seqdesign.cseqdlogrank)) via the following code.\n\n```sas\nDATA BHR;\n SET BMLE;\n Bound_UA_HR=exp(-Bound_UA);\n Bound_UB_HR=exp(-Bound_UB);\n LABEL BOUND_UA_HR=\"Upper Alpha (HR)\" BOUND_UA_HR=\"Upper Beta (HR)\";\nPROC PRINT LABEL;\n VAR _Stage_ _InfoProp_ Bound_UA Bound_UB Bound_UA_HR Bound_UB_HR;\nRUN;\n```\n\nThe HR boundaries are shown below.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gsd-tte/result-pfs-sas-bhr.png){fig-align='center' width=100%}\n:::\n:::\n\n\nThe results calculated by SAS are presneted in the table below. Please note that SAS doesn't report the probablities $P(Cross | HR=1)$ and $P(Cross | HR=0.6)$, resulting in empty cells for these results in the table.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

2.6606

0.7379

N=398

p (1-sided)

0.0039

0.2303

Events: 176

HR at bound

0.6696

0.8947

Month: 25

P(Cross) if HR=1

P(Cross) if HR=0.6

FA

Z

2.2798

N=398

p (1-sided)

0.0113

Events: 235

HR at bound

0.7427

Month: 34

P(Cross) if HR=1

P(Cross) if HR=0.6

\n```\n\n:::\n:::\n\n\n- SAS vs rapct comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

Efficacy

Futility

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

P(Cross) if HR=0.6

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

P(Cross) if HR=0.6

\n```\n\n:::\n:::\n\n\n- SAS vs EAST comparison using absolute differences:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n

Analysis

Value

diff_eff_sas

diff_fut_sas

IA1: 75%

Z

0.0000

0.0000

N=398

p (1-sided)

0.0000

0.0000

Events: 176

HR at bound

0.0000

0.0000

Month: 25

P(Cross) if HR=1

P(Cross) if HR=0.60

FA

Z

0.0000

N=398

p (1-sided)

0.0000

Events: 235

HR at bound

0.0000

Month: 34

P(Cross) if HR=1

P(Cross) if HR=0.60

\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsessionInfo()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nR version 4.5.2 (2025-10-31)\nPlatform: x86_64-pc-linux-gnu\nRunning under: Ubuntu 24.04.3 LTS\n\nMatrix products: default\nBLAS: /usr/lib/x86_64-linux-gnu/openblas-pthread/libblas.so.3 \nLAPACK: /usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.26.so; LAPACK version 3.12.0\n\nlocale:\n [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C \n [3] LC_TIME=en_US.UTF-8 LC_COLLATE=en_US.UTF-8 \n [5] LC_MONETARY=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 \n [7] LC_PAPER=en_US.UTF-8 LC_NAME=C \n [9] LC_ADDRESS=C LC_TELEPHONE=C \n[11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C \n\ntime zone: Europe/London\ntzcode source: system (glibc)\n\nattached base packages:\n[1] stats graphics grDevices datasets utils methods base \n\nother attached packages:\n[1] rpact_4.3.0 tibble_3.3.1 gsDesign2_1.1.8 gsDesign_3.9.0 \n[5] flextable_0.9.11\n\nloaded via a namespace (and not attached):\n [1] gt_1.3.0 rappdirs_0.3.4 sass_0.4.10 \n [4] generics_0.1.4 tidyr_1.3.2 fontLiberation_0.1.0 \n [7] renv_1.0.10 xml2_1.5.2 r2rtf_1.3.0 \n[10] digest_0.6.39 magrittr_2.0.4 evaluate_1.0.5 \n[13] grid_4.5.2 RColorBrewer_1.1-3 fastmap_1.2.0 \n[16] jsonlite_2.0.0 zip_2.3.3 purrr_1.2.1 \n[19] scales_1.4.0 fontBitstreamVera_0.1.1 textshaping_1.0.4 \n[22] cli_3.6.5 rlang_1.1.7 fontquiver_0.2.1 \n[25] withr_3.0.2 yaml_2.3.12 otel_0.2.0 \n[28] gdtools_0.5.0 tools_4.5.2 officer_0.7.3 \n[31] uuid_1.2-2 dplyr_1.2.0 ggplot2_4.0.2 \n[34] vctrs_0.7.1 R6_2.6.1 lifecycle_1.0.5 \n[37] fs_1.6.6 htmlwidgets_1.6.4 ragg_1.5.0 \n[40] pkgconfig_2.0.3 pillar_1.11.1 gtable_0.3.6 \n[43] data.table_1.18.2.1 glue_1.8.0 Rcpp_1.1.1 \n[46] systemfonts_1.3.1 xfun_0.56 tidyselect_1.2.1 \n[49] knitr_1.51 xtable_1.8-4 farver_2.1.2 \n[52] htmltools_0.5.9 patchwork_1.3.2 rmarkdown_2.30 \n[55] compiler_4.5.2 S7_0.2.1 askpass_1.2.1 \n[58] openssl_2.3.4 \n```\n\n\n:::\n:::\n\n\n## References\n\n- Lachin JM and Foulkes M. Evaluation of sample size and power for analyses of survival with allowance for nonuniform patient entry, losses to follow-up, non-compliance, and stratification. Biometrics 1986;42:507-19.\n- Kim K and Tsiatis AA. Study duration for clinical trials with survival response and early stopping rule. Biometrics 1990(46): 81-92.\n- Schemper M, Wakounig S and Heinze G. The estimation of average hazard ratios by weighted cox regression. Statistics in Medicine 2009; 28(19): 2473-2489.\n- Yung G and Liu Y. Sample size and power for the weighted log-rank test and Kaplan-Meier based tests with allowance for nonproportional hazards. Biometrics 2020;76:939-50.", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/Comp/r-sas-python_survey-stats-summary/execute-results/html.json b/_freeze/Comp/r-sas-python_survey-stats-summary/execute-results/html.json index 6268494ad..c5fcc4945 100644 --- a/_freeze/Comp/r-sas-python_survey-stats-summary/execute-results/html.json +++ b/_freeze/Comp/r-sas-python_survey-stats-summary/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "886e364bd8434d6a5b9e3197d09afd06", + "hash": "2fdbf7bb2e5745b861a647c4d512ac44", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS vs Python Survey Summary Statistics\"\nbibliography: survey-stats-summary.bib\nexecute: \n eval: false\n---\n\nThis document will compare the survey summary statistics functionality in SAS (available through SAS/STAT), R (available from the [`{survey}`](%5B%60%7Bsurvey%7D%60%5D(https://r-survey.r-forge.r-project.org/survey/html/api.html)) package), and Python (available from the [`samplics`](https://samplics-org.github.io/samplics/) package), highlighting differences in methods and results. Only the default Taylor series linearisation method for calculating variances is used in all languages. A more detailed comparison between R and SAS for specific methods and use-cases is available in [@2017_YRBS], [@so2020modelling], or [@adamico_2009]. For a general guide to survey statistics, which has companion guides for both R and SAS, see [@Lohr_2022].\n\n# Result Comparison\n\nThe following table shows different survey summary statistics, the capabilities of each language, and whether or not the results match. Each analysis also includes calculating the standard error and confidence intervals.\n\n| Analysis | Supported in R | Supported in SAS | Supported in Python | Results Match\\* | Notes |\n|------------|------------|------------|------------|------------|------------|\n| Mean | [Yes](../R/survey-stats-summary.html#Mean) | [Yes](../SAS/survey-stats-summary.html#Mean) | [Yes](../Python/survey-stats-summary.html#Mean) | Yes | Must specify degrees of freedom in R for confidence limits |\n| Total | [Yes](../R/survey-stats-summary.html#Total) | [Yes](../SAS/survey-stats-summary.html#Total) | [Yes](../Python/survey-stats-summary.html#Total) | Yes | Must specify degrees of freedom in R for confidence limits |\n| Ratios | [Yes](../R/survey-stats-summary.html#Ratios) | [Yes](../SAS/survey-stats-summary.html#Ratios) | [Yes](../Python/survey-stats-summary.html#Ratios) | Yes | Must specify degrees of freedom in R for confidence limits |\n| Proportions | [Yes](../R/survey-stats-summary.html#Proportions) | [Yes](../SAS/survey-stats-summary.html##Proportions) | [Yes](../Python/survey-stats-summary.html#Proportions) | Yes\\*\\* | In Python, the confidence limits of proportions only match to 1 or 2 s.f. This is due to a different method being used, which is undocumented. |\n| Quantiles | [Yes](../R/survey-stats-summary.html#Quantiles) | [Yes](../SAS/survey-stats-summary.html#Quantiles) | No | [No](#Quantiles) | Different methods for calculating quantiles |\n| Domain Analysis | [Yes](../R/survey-stats-summary.html#Summary%20Statistics%20on%20Complex%20Survey%20Designs) | [Yes](../SAS/survey-stats-summary.html##Summary%20Statistics%20on%20Complex%20Survey%20Designs) | [Yes](../Python/survey-stats-summary.html#Domain%20Estimations) | Yes | |\n| Design Effect | [Yes](../R/survey-stats-summary.html#Summary%20Statistics%20on%20Complex%20Survey%20Designs) | [Yes](../SAS/survey-stats-summary.html##Summary%20Statistics%20on%20Complex%20Survey%20Designs) | No | Yes | Set `deff=\"replace\"` in R to match SAS exactly |\n\n*\\*Results match where feature is available*\n\n*\\*\\*For confidence limits of proportions near 0 and 1, `survey::svyciprop` can be more accurate than `confint` in R, but does not match other software.*\n\nFor the full R, SAS, and Python code and results used for this comparison, see below:\n\n::: {.callout-note collapse=\"true\" appearance=\"minimal\" title=\"Show Code\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survey)\n\ndata(\"nhanes\")\n\nnhanes_design <- survey::svydesign(\n data = nhanes,\n id = ~SDMVPSU, # Specify the PSU/cluster column\n strata = ~SDMVSTRA, # The stratification column\n weights = ~WTMEC2YR, # The weighting column\n nest = TRUE # Allows for PSUs with the same name nested within different strata\n)\n\n# Mean of HI_CHOL\nhi_chol_mean <- survey::svymean(~HI_CHOL, nhanes_design, na.rm = TRUE)\n\n# Sum of HI_CHOL\nhi_chol_sum <- survey::svytotal(~HI_CHOL, nhanes_design, na.rm = TRUE)\n\n# Ratio of HI_CHOL / RIAGENDR\nhi_chol_ratio <- survey::svyratio(\n numerator = ~HI_CHOL,\n denominator = ~RIAGENDR,\n nhanes_design,\n na.rm = TRUE,\n ci = TRUE,\n se = TRUE,\n separate = FALSE\n)\n\n# Proportion of different AGECAT values\nagecat_props <- survey::svymean(~agecat, nhanes_design, na.rm = TRUE)\n\n# Quantiles of HI_CHOL\nhi_chol_quart <- survey::svyquantile(\n ~HI_CHOL,\n nhanes_design,\n quantiles = c(0.025, 0.5, 0.975),\n na.rm = TRUE,\n ci = TRUE\n)\n\n# Domain analysis of mean of HI_CHOL by race, with design effect\nhi_chol_mean_by_race <- survey::svyby(\n ~HI_CHOL,\n ~race,\n nhanes_design,\n svymean,\n na.rm = TRUE,\n deff = \"replace\"\n)\n\nprint(list(\n \"Mean of HI_CHOL\" = coef(hi_chol_mean),\n \"SE of Mean HI_CHOL\" = survey::SE(hi_chol_mean),\n \"CL of Mean HI_CHOL\" = confint(\n hi_chol_mean,\n df = survey::degf(nhanes_design)\n ),\n \"Sum of HI_CHOL\" = coef(hi_chol_sum),\n \"SE of Sum HI_CHOL\" = survey::SE(hi_chol_sum),\n \"CL of Sum HI_CHOL\" = confint(hi_chol_sum, df = survey::degf(nhanes_design)),\n \"Ratio of HI_CHOL / RIAGENDR\" = coef(hi_chol_ratio),\n \"SE of Ratio HI_CHOL / RIAGENDR\" = survey::SE(hi_chol_ratio),\n \"CL of Ratio HI_CHOL / RIAGENDR\" = confint(\n hi_chol_ratio,\n df = survey::degf(nhanes_design)\n ),\n \"Proportion of AGECAT\" = coef(agecat_props),\n \"SE of Proportion AGECAT\" = survey::SE(agecat_props),\n \"CL of Proportion AGECAT\" = confint(\n agecat_props,\n df = survey::degf(nhanes_design)\n ),\n \"Quantiles of HI_CHOL\" = coef(hi_chol_quart),\n \"SE of Quantiles HI_CHOL\" = survey::SE(hi_chol_quart),\n \"CL of Quantiles HI_CHOL\" = confint(\n hi_chol_quart,\n df = survey::degf(nhanes_design)\n ),\n \"Mean of HI_CHOL by race\" = coef(hi_chol_mean_by_race),\n \"SE of HI_CHOL by race\" = survey::SE(hi_chol_mean_by_race),\n \"CL of HI_CHOL by race\" = confint(\n hi_chol_mean_by_race,\n df = survey::degf(nhanes_design)\n ),\n \"Design Effect of HI_CHOL by race\" = hi_chol_mean_by_race$DEff.HI_CHOL\n))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$`Mean of HI_CHOL`\n HI_CHOL \n0.112143 \n\n$`SE of Mean HI_CHOL`\n HI_CHOL\nHI_CHOL 0.00544584\n\n$`CL of Mean HI_CHOL`\n 2.5 % 97.5 %\nHI_CHOL 0.1005983 0.1236876\n\n$`Sum of HI_CHOL`\n HI_CHOL \n28635245 \n\n$`SE of Sum HI_CHOL`\n HI_CHOL\nHI_CHOL 2020711\n\n$`CL of Sum HI_CHOL`\n 2.5 % 97.5 %\nHI_CHOL 24351530 32918961\n\n$`Ratio of HI_CHOL / RIAGENDR`\nHI_CHOL/RIAGENDR \n 0.07422209 \n\n$`SE of Ratio HI_CHOL / RIAGENDR`\nHI_CHOL/RIAGENDR \n 0.003714728 \n\n$`CL of Ratio HI_CHOL / RIAGENDR`\n 2.5 % 97.5 %\nHI_CHOL/RIAGENDR 0.06634722 0.08209696\n\n$`Proportion of AGECAT`\n agecat(0,19] agecat(19,39] agecat(39,59] agecat(59,Inf] \n 0.2077495 0.2934079 0.3032896 0.1955530 \n\n$`SE of Proportion AGECAT`\n agecat(0,19] agecat(19,39] agecat(39,59] agecat(59,Inf] \n 0.006129950 0.009560692 0.004519463 0.008092578 \n\n$`CL of Proportion AGECAT`\n 2.5 % 97.5 %\nagecat(0,19] 0.1947546 0.2207444\nagecat(19,39] 0.2731401 0.3136756\nagecat(39,59] 0.2937088 0.3128704\nagecat(59,Inf] 0.1783975 0.2127085\n\n$`Quantiles of HI_CHOL`\nHI_CHOL.0.025 HI_CHOL.0.5 HI_CHOL.0.975 \n 0 0 1 \n\n$`SE of Quantiles HI_CHOL`\nHI_CHOL.0.025 HI_CHOL.0.5 HI_CHOL.0.975 \n 0.2358596 0.2358596 0.0000000 \n\n$`CL of Quantiles HI_CHOL`\n l u\nHI_CHOL.0.025 0 1\nHI_CHOL.0.5 0 1\nHI_CHOL.0.975 1 1\n\n$`Mean of HI_CHOL by race`\n 1 2 3 4 \n0.10149167 0.12164921 0.07864006 0.09967861 \n\n$`SE of HI_CHOL by race`\n[1] 0.006245843 0.006604134 0.010384645 0.024666227\n\n$`CL of HI_CHOL by race`\n 2.5 % 97.5 %\n1 0.08825107 0.1147323\n2 0.10764907 0.1356493\n3 0.05662560 0.1006545\n4 0.04738854 0.1519687\n\n$`Design Effect of HI_CHOL by race`\n[1] 1.082734 1.407822 2.091156 3.098290\n```\n\n\n:::\n:::\n\n\n## SAS\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n* Mean, sum quantile of HI_CHOL;\nproc surveymeans data=nhanes mean sum clm quantile=(0.025 0.5 0.975);\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n var HI_CHOL;\nrun;\n\n* Ratio of HI_CHOL / RIAGENDR;\nproc surveymeans data=nhanes;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n ratio HI_CHOL / RIAGENDR;\nrun;\n\n* Proportions of agecat;\nproc surveyfreq data=nhanes;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n table agecat / cl;\nrun;\n\n* Mean and DEFF of HI_CHOL by race;\nproc surveymeans data=nhanes mean deff;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n domain race;\n var HI_CHOL;\nrun;\n```\n:::\n\n\n``` default\n\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error Std Error\n Variable Mean of Mean 95% CL for Mean Sum of Sum 95% CL for Sum\n --------------------------------------------------------------------------------------------------------------------------\n HI_CHOL 0.112143 0.005446 0.10059829 0.12368762 28635245 2020711 24351529.8 32918960.7\n --------------------------------------------------------------------------------------------------------------------------\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n HI_CHOL 2.5 0 0.024281 -0.0514730 0.05147298\n 50 Median 0 0.024281 -0.0514730 0.05147298\n 97.5 0.777070 0.024281 0.7255973 0.82854324\n ---------------------------------------------------------------------------------\n\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error\n Variable N Mean of Mean 95% CL for Mean\n ---------------------------------------------------------------------------------\n HI_CHOL 7846 0.112143 0.005446 0.10059829 0.12368762\n RIAGENDR 8591 1.512019 0.005302 1.50077977 1.52325807\n ---------------------------------------------------------------------------------\n\n\n Ratio Analysis\n\n Std\n Numerator Denominator N Ratio Error 95% CL for Ratio\n ----------------------------------------------------------------------------------------------\n HI_CHOL RIAGENDR 7846 0.074222 0.003715 0.06634722 0.08209696\n ----------------------------------------------------------------------------------------------\n\n The SURVEYFREQ Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Table of agecat\n\n Weighted Std Err of Std Err of 95% Confidence Limits\n agecat Frequency Frequency Wgt Freq Percent Percent for Percent\n -------------------------------------------------------------------------------------------------------\n (0,19] 2532 57450307 3043819 20.7749 0.6130 19.4755 22.0744\n (19,39] 2033 81137975 3692818 29.3408 0.9561 27.3140 31.3676\n (39,59] 2021 83870623 4853936 30.3290 0.4519 29.3709 31.2870\n (59,Inf] 2005 54077541 4284296 19.5553 0.8093 17.8398 21.2709\n\n Total 8591 276536446 13935730 100.0000 \n -------------------------------------------------------------------------------------------------------\n\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error Design\n Variable Mean of Mean Effect\n --------------------------------------------------------\n HI_CHOL 0.112143 0.005446 2.336725\n --------------------------------------------------------\n\n The SURVEYMEANS Procedure\n\n Statistics for race Domains\n\n Std Error Design\n race Variable Mean of Mean Effect\n ------------------------------------------------------------------------\n 1 HI_CHOL 0.101492 0.006246 1.082734\n 2 HI_CHOL 0.121649 0.006604 1.407822\n 3 HI_CHOL 0.078640 0.010385 2.091156\n 4 HI_CHOL 0.099679 0.024666 3.098290\n ------------------------------------------------------------------------\n```\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nimport pandas as pd\nfrom samplics import TaylorEstimator\nfrom samplics.utils.types import PopParam\n\nnhanes = pd.read_csv(\"../data/nhanes.csv\")\n\nnhanes_design_kwargs = dict(\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n remove_nan=True,\n)\n\n# Mean of HI_CHOL\nmean_estimator = TaylorEstimator(PopParam.mean)\nmean_estimator.estimate(nhanes[\"HI_CHOL\"], **nhanes_design_kwargs)\nhi_chol_means = mean_estimator.to_dataframe()\n\n# Sum of HI_CHOL\ntotal_estimator = TaylorEstimator(PopParam.total)\ntotal_estimator.estimate(nhanes[\"HI_CHOL\"], **nhanes_design_kwargs)\nhi_chol_totals = total_estimator.to_dataframe()\n\n# Ratio of HI_CHOL / RIAGENDR\nratio_estimator = TaylorEstimator(PopParam.ratio)\nratio_estimator.estimate(\n y=nhanes[\"HI_CHOL\"], x=nhanes[\"RIAGENDR\"], **nhanes_design_kwargs\n)\nhi_chol_ratio = ratio_estimator.to_dataframe()\n\n# Proportion of different AGECAT values\nprop_estimator = TaylorEstimator(PopParam.prop)\nprop_estimator.estimate(nhanes[\"agecat\"], **nhanes_design_kwargs)\nagecat_prop = prop_estimator.to_dataframe()\n\n# Quantiles of HI_CHOL\n# NA\n\n# Domain analysis of mean of HI_CHOL by race, with design effect\nmean_estimator = TaylorEstimator(PopParam.mean)\nmean_estimator.estimate(\n nhanes[\"HI_CHOL\"],\n **nhanes_design_kwargs,\n domain=nhanes[\"race\"],\n deff=True, # Design effect param currently has no effect\n)\nhi_chol_domain_means = mean_estimator.to_dataframe()\n\n\nag_dict = agecat_prop.set_index(\"_level\").to_dict()\nhc_dict = hi_chol_domain_means.set_index(\"_domain\").to_dict()\n\nprint(\n f\"\"\"\n Mean of HI_CHOL: {hi_chol_means[\"_estimate\"][0]}\n SE of Mean HI_CHOL: {hi_chol_means[\"_stderror\"][0]}\n CL of Mean HI_CHOL: {(hi_chol_means[\"_lci\"][0], hi_chol_means[\"_uci\"][0])}\n Sum of HI_CHOL: {hi_chol_totals[\"_estimate\"][0]}\n SE of Sum HI_CHOL: {hi_chol_totals[\"_stderror\"][0]}\n CL of Sum HI_CHOL: {(hi_chol_totals[\"_lci\"][0], hi_chol_totals[\"_uci\"][0])}\n Ratio of HI_CHOL / RIAGENDR: {hi_chol_ratio[\"_estimate\"][0]}\n SE of Ratio HI_CHOL / RIAGENDR: {hi_chol_ratio[\"_stderror\"][0]}\n CL of Ratio HI_CHOL / RIAGENDR: {(hi_chol_ratio[\"_lci\"][0], hi_chol_ratio[\"_uci\"][0])}\n Proportion of AGECAT: {ag_dict[\"_estimate\"]}\n SE of Proportion AGECAT: {ag_dict[\"_stderror\"]}\n LCL of Proportion AGECAT: {ag_dict[\"_lci\"]}\n UCL of Proportion AGECAT: {ag_dict[\"_uci\"]}\n Quantiles of HI_CHOL: Not available\n Mean of HI_CHOL by race: {hc_dict[\"_estimate\"]}\n SE of HI_CHL by race: {hc_dict[\"_stderror\"]}\n LCL of HI_CHOL by race: {hc_dict[\"_lci\"]}\n UCL of HI_CHOL by race: {hc_dict[\"_uci\"]}\n Design Effect of HI_CHOL by race: Not available\n \"\"\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n Mean of HI_CHOL: 0.11214295634969222\n SE of Mean HI_CHOL: 0.005445839698954557\n CL of Mean HI_CHOL: (np.float64(0.1005982919131703), np.float64(0.12368762078621415))\n Sum of HI_CHOL: 28635245.254672\n SE of Sum HI_CHOL: 2020710.7436996205\n CL of Sum HI_CHOL: (np.float64(24351529.84091034), np.float64(32918960.668433655))\n Ratio of HI_CHOL / RIAGENDR: 0.07422209323594066\n SE of Ratio HI_CHOL / RIAGENDR: 0.0037147278931070065\n CL of Ratio HI_CHOL / RIAGENDR: (np.float64(0.06634722189017901), np.float64(0.0820969645817023))\n Proportion of AGECAT: {'(0,19]': 0.2077494937870972, '(19,39]': 0.29340788818591346, '(39,59]': 0.30328958320385285, '(59,Inf]': 0.19555303482313666}\n SE of Proportion AGECAT: {'(0,19]': 0.006129950336419631, '(19,39]': 0.009560691634608896, '(39,59]': 0.004519462827363183, '(59,Inf]': 0.008092578243976422}\n LCL of Proportion AGECAT: {'(0,19]': 0.19505410930097866, '(19,39]': 0.27355685874096586, '(39,59]': 0.2937950591158628, '(59,Inf]': 0.1789647230500222}\n UCL of Proportion AGECAT: {'(0,19]': 0.2210442684297426, '(19,39]': 0.3140766293472951, '(39,59]': 0.31295496708023285, '(59,Inf]': 0.21327950895208636}\n Quantiles of HI_CHOL: Not available\n Mean of HI_CHOL by race: {1: 0.10149166545397208, 2: 0.12164920535593333, 3: 0.07864006039908408, 4: 0.09967860947712034}\n SE of HI_CHL by race: {1: 0.006245843308749599, 2: 0.006604133623532979, 3: 0.010384645000548863, 4: 0.024666226871851268}\n LCL of HI_CHOL by race: {1: 0.0882510691256497, 2: 0.10764906749064211, 3: 0.056625596431891564, 4: 0.04738854441969514}\n UCL of HI_CHOL by race: {1: 0.11473226178229445, 2: 0.13564934322122454, 3: 0.1006545243662766, 4: 0.15196867453454554}\n Design Effect of HI_CHOL by race: Not available\n \n```\n\n\n:::\n:::\n\n:::\n\n# Differences\n\n## Quantiles {#quantiles}\n\n`samplics` in Python does not have a method for calculating quantiles, and in R and SAS the available methods lead to different results. To demonstrate the differences in calculating quantiles, we will use the `apisrs` dataset from the `survey` package in R [@API_2000].\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survey)\n\ndata(\"api\")\n\nhead(apisrs) |>\n gt::gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n
cdsstypenamesnamesnumdnamednumcnamecnumflagpcttestapi00api99targetgrowthsch.widecomp.impbothawardsmealsellyr.rndmobilityacs.k3acs.46acs.corepct.respnot.hsghsgsome.colcol.gradgrad.schavg.edfullemerenrollapi.stupwfpc
15739081534155HMcFarland HighMcFarland High1039McFarland Unified432Kern14NA984624481814NoYesNoNo4431NA6NANA2482443412731.91713547742930.976194
19642126066716EStowers (Cecil Stowers (Cecil B.) Elementary1124ABC Unified1Los Angeles18NA100878831NA47YesYesYesYes825NA151930NA974102343213.66901047842030.976194
30664493030640HBrea-Olinda HigBrea-Olinda High2868Brea-Olinda Unified79Orange29NA987347423-8NoNoNoNo1010NA7NANA2895592141243.7183181410128730.976194
19644516012744EAlameda ElementAlameda Elementary1273Downey Unified187Los Angeles18NA997726577115YesYesYesYes7025NA2323NANA100374014811.96851834229130.976194
40688096043293ESunnyside ElemeSunnyside Elementary4926San Luis Coastal Unified640San Luis Obispo39NA99739719420YesYesYesYes4312NA122029NA918212734103.17100021718930.976194
19734456014278ELos Molinos EleLos Molinos Elementary2463Hacienda la Puente Unif284Los Angeles18NA93835822NA13YesYesYesNo1619NA131929NA71182038343.96752025821130.976194
\n
\n```\n\n:::\n:::\n\n\nIn SAS, PROC SURVEYMEANS will calculate quantiles of specific probabilities as you request them, using Woodruff's method for intervals and a custom quantile method [@SAS_2018, pp. 9834]. The quantile method does not match any of the available `qrules` in R, and although the default `interval.types` in the R `survey::svyquantile` function also uses Woodruff's method, it is a different implementation.\n\nThe method and results from SAS are as follows:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc surveymeans data=apisrs total=6194 quantile=(0.025 0.5 0.975);\n var growth;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n growth 2.5 -16.500000 1.755916 -19.962591 -13.037409\n 50 Median 26.500000 1.924351 22.705263 30.294737\n 97.5 99.000000 16.133827 67.184794 130.815206\n ---------------------------------------------------------------------------------\n```\n\nIf in R we use the default `qrule=\"math\"` (equivalent to `qrule=\"hf1\"` and matches `type=1` in the `quantile` function for unweighted data) along with the default `interval.type=\"mean\"`, we get the following results:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsrs_design <- survey::svydesign(data = apisrs, id = ~1, fpc = ~fpc, )\n\nsurvey::svyquantile(\n ~growth,\n srs_design,\n quantiles = c(0.025, 0.5, 0.975),\n ci = TRUE,\n se = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$growth\n quantile ci.2.5 ci.97.5 se\n0.025 -16 -21 -12 2.281998\n0.5 27 24 31 1.774887\n0.975 103 93 189 24.341307\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\nHere we can see that the quantiles, confidence intervals, and standard errors do not match SAS. From testing, none of the available `qrule` methods match SAS for the quantile values, so it is recommended to use the default values unless you have need of some of the other properties of different quantile definitions - see [`vignette(\"qrule\", package=\"survey\")`](https://cran.r-project.org/web/packages/survey/vignettes/qrule.pdf) for more detail. If an exact match to SAS is required, then the `svyquantile` function allows for passing a custom function to the `qrule` argument to define your own method for calculating quantiles. Below is an example that will match SAS:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsas_qrule <- function(x, w, p) {\n # Custom qrule to match SAS, based on survey::oldsvyquantile's internal method\n if (any(is.na(x))) {\n return(NA * p)\n }\n w <- rowsum(w, x, reorder = TRUE)\n x <- sort(unique(x))\n cum.w <- cumsum(w) / sum(w)\n cdf <- approxfun(\n cum.w,\n x,\n method = \"linear\",\n f = 1,\n yleft = min(x),\n yright = max(x),\n ties = min\n )\n cdf(p)\n}\n\n\nsas_quants <- survey::svyquantile(\n ~growth,\n srs_design,\n quantiles = c(0.025, 0.5, 0.975),\n qrule = sas_qrule,\n ci = TRUE,\n se = TRUE\n)\n\nsas_quants\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$growth\n quantile ci.2.5 ci.97.5 se\n0.025 -16.5 -22.00000 -15.07482 1.755916\n0.5 26.5 23.03563 30.62510 1.924351\n0.975 99.0 83.70616 147.33657 16.133827\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\nNote that although the quantiles and standard errors match, the confidence intervals still do not match SAS. For this another custom calculation is required, based on the formula used in SAS:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsas_quantile_confint <- function(newsvyquantile, level = 0.05, df = Inf) {\n q <- coef(newsvyquantile)\n se <- survey::SE(newsvyquantile)\n ci <- cbind(\n q,\n q + se * qt(level / 2, df),\n q - se * qt(1 - level / 2, df),\n se\n )\n colnames(ci) <- c(\n \"quantile\",\n paste0(\"ci.\", c(100 * level / 2, 100 * (1 - level / 2))),\n \"se\"\n )\n\n ci\n}\n\nsas_quantile_confint(sas_quants, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n quantile ci.2.5 ci.97.5 se\ngrowth.0.025 -16.5 -19.96259 -19.96259 1.755916\ngrowth.0.5 26.5 22.70526 22.70526 1.924351\ngrowth.0.975 99.0 67.18479 67.18479 16.133827\n```\n\n\n:::\n:::\n\n\n## Other considerations\n\n### Degrees of Freedom\n\nSome of the functions in R require the degrees of freedom to be specified when calculating confidence intervals, otherwise it assumes a normal distribution. This can be done easily by using the `survey::degf` function, which calculates the degrees of freedom for a survey design object.\n\n### Single PSU Strata\n\nAlthough it was not apparent with the examples used here, if there is only one PSU from a stratum then R will by default error, whereas SAS will remove that stratum from the variance calculation. This can be changed in R by setting the `options(survey.lonely.psu=\"certainty\")` to match SAS and have it make no contribution to the variance. In `samplics`, this behaviour can be configured using the `single_psu` argument to the `estimate` method, and can be set to to match SAS using `SinglePSUEst.certainty`. This should be considered carefully however, in R and Python there are additional methods of handling single PSUs that may be more appropriate for your use-case.\n\n### Documentation Differences\n\nOne key consideration when choosing a statistical package is the documentation available. In this case, both the `survey` package in R and the survey procedures in SAS have a much more comprehensive set of documentation and examples than `samplics` in Python. This includes both detailed examples, as well as the underlying theory and methods used in the calculations including references to the literature.\n\n# Summary and Recommendations\n\nThe `{survey}` package in R and the survey procedures in SAS/STAT both provide similar functionality for calculating survey summary statistics. In most cases in both our tests and others, the results are identical ([@2017_YRBS], [@so2020modelling], [@adamico_2009]). Where differences do occur, primarily in calculating quantiles, the methods in R are more varied and well-documented.\n\nIn contrast, the `samplics` package in Python is still early in development, and although it does provide some functionality there are still major limitations in both basic statistics (i.e. quantiles) and in more complex methods that were beyond the scope of this document, and the methods are much less well-documented.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-24\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n:::\n\n:::", + "markdown": "---\ntitle: \"R vs SAS vs Python Survey Summary Statistics\"\nbibliography: survey-stats-summary.bib\n---\n\nThis document will compare the survey summary statistics functionality in SAS (available through SAS/STAT), R (available from the [`{survey}`](%5B%60%7Bsurvey%7D%60%5D(https://r-survey.r-forge.r-project.org/survey/html/api.html)) package), and Python (available from the [`samplics`](https://samplics-org.github.io/samplics/) package), highlighting differences in methods and results. Only the default Taylor series linearisation method for calculating variances is used in all languages. A more detailed comparison between R and SAS for specific methods and use-cases is available in [@2017_YRBS], [@so2020modelling], or [@adamico_2009]. For a general guide to survey statistics, which has companion guides for both R and SAS, see [@Lohr_2022].\n\n# Result Comparison\n\nThe following table shows different survey summary statistics, the capabilities of each language, and whether or not the results match. Each analysis also includes calculating the standard error and confidence intervals.\n\n| Analysis | Supported in R | Supported in SAS | Supported in Python | Results Match\\* | Notes |\n|------------|------------|------------|------------|------------|------------|\n| Mean | [Yes](../R/survey-stats-summary.html#Mean) | [Yes](../SAS/survey-stats-summary.html#Mean) | [Yes](../Python/survey-stats-summary.html#Mean) | Yes | Must specify degrees of freedom in R for confidence limits |\n| Total | [Yes](../R/survey-stats-summary.html#Total) | [Yes](../SAS/survey-stats-summary.html#Total) | [Yes](../Python/survey-stats-summary.html#Total) | Yes | Must specify degrees of freedom in R for confidence limits |\n| Ratios | [Yes](../R/survey-stats-summary.html#Ratios) | [Yes](../SAS/survey-stats-summary.html#Ratios) | [Yes](../Python/survey-stats-summary.html#Ratios) | Yes | Must specify degrees of freedom in R for confidence limits |\n| Proportions | [Yes](../R/survey-stats-summary.html#Proportions) | [Yes](../SAS/survey-stats-summary.html##Proportions) | [Yes](../Python/survey-stats-summary.html#Proportions) | Yes\\*\\* | In Python, the confidence limits of proportions only match to 1 or 2 s.f. This is due to a different method being used, which is undocumented. |\n| Quantiles | [Yes](../R/survey-stats-summary.html#Quantiles) | [Yes](../SAS/survey-stats-summary.html#Quantiles) | No | [No](#Quantiles) | Different methods for calculating quantiles |\n| Domain Analysis | [Yes](../R/survey-stats-summary.html#Summary%20Statistics%20on%20Complex%20Survey%20Designs) | [Yes](../SAS/survey-stats-summary.html##Summary%20Statistics%20on%20Complex%20Survey%20Designs) | [Yes](../Python/survey-stats-summary.html#Domain%20Estimations) | Yes | |\n| Design Effect | [Yes](../R/survey-stats-summary.html#Summary%20Statistics%20on%20Complex%20Survey%20Designs) | [Yes](../SAS/survey-stats-summary.html##Summary%20Statistics%20on%20Complex%20Survey%20Designs) | No | Yes | Set `deff=\"replace\"` in R to match SAS exactly |\n\n*\\*Results match where feature is available*\n\n*\\*\\*For confidence limits of proportions near 0 and 1, `survey::svyciprop` can be more accurate than `confint` in R, but does not match other software.*\n\nFor the full R, SAS, and Python code and results used for this comparison, see below:\n\n::: {.callout-note collapse=\"true\" appearance=\"minimal\" title=\"Show Code\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survey)\n\ndata(\"nhanes\")\n\nnhanes_design <- survey::svydesign(\n data = nhanes,\n id = ~SDMVPSU, # Specify the PSU/cluster column\n strata = ~SDMVSTRA, # The stratification column\n weights = ~WTMEC2YR, # The weighting column\n nest = TRUE # Allows for PSUs with the same name nested within different strata\n)\n\n# Mean of HI_CHOL\nhi_chol_mean <- survey::svymean(~HI_CHOL, nhanes_design, na.rm = TRUE)\n\n# Sum of HI_CHOL\nhi_chol_sum <- survey::svytotal(~HI_CHOL, nhanes_design, na.rm = TRUE)\n\n# Ratio of HI_CHOL / RIAGENDR\nhi_chol_ratio <- survey::svyratio(\n numerator = ~HI_CHOL,\n denominator = ~RIAGENDR,\n nhanes_design,\n na.rm = TRUE,\n ci = TRUE,\n se = TRUE,\n separate = FALSE\n)\n\n# Proportion of different AGECAT values\nagecat_props <- survey::svymean(~agecat, nhanes_design, na.rm = TRUE)\n\n# Quantiles of HI_CHOL\nhi_chol_quart <- survey::svyquantile(\n ~HI_CHOL,\n nhanes_design,\n quantiles = c(0.025, 0.5, 0.975),\n na.rm = TRUE,\n ci = TRUE\n)\n\n# Domain analysis of mean of HI_CHOL by race, with design effect\nhi_chol_mean_by_race <- survey::svyby(\n ~HI_CHOL,\n ~race,\n nhanes_design,\n svymean,\n na.rm = TRUE,\n deff = \"replace\"\n)\n\nprint(list(\n \"Mean of HI_CHOL\" = coef(hi_chol_mean),\n \"SE of Mean HI_CHOL\" = survey::SE(hi_chol_mean),\n \"CL of Mean HI_CHOL\" = confint(\n hi_chol_mean,\n df = survey::degf(nhanes_design)\n ),\n \"Sum of HI_CHOL\" = coef(hi_chol_sum),\n \"SE of Sum HI_CHOL\" = survey::SE(hi_chol_sum),\n \"CL of Sum HI_CHOL\" = confint(hi_chol_sum, df = survey::degf(nhanes_design)),\n \"Ratio of HI_CHOL / RIAGENDR\" = coef(hi_chol_ratio),\n \"SE of Ratio HI_CHOL / RIAGENDR\" = survey::SE(hi_chol_ratio),\n \"CL of Ratio HI_CHOL / RIAGENDR\" = confint(\n hi_chol_ratio,\n df = survey::degf(nhanes_design)\n ),\n \"Proportion of AGECAT\" = coef(agecat_props),\n \"SE of Proportion AGECAT\" = survey::SE(agecat_props),\n \"CL of Proportion AGECAT\" = confint(\n agecat_props,\n df = survey::degf(nhanes_design)\n ),\n \"Quantiles of HI_CHOL\" = coef(hi_chol_quart),\n \"SE of Quantiles HI_CHOL\" = survey::SE(hi_chol_quart),\n \"CL of Quantiles HI_CHOL\" = confint(\n hi_chol_quart,\n df = survey::degf(nhanes_design)\n ),\n \"Mean of HI_CHOL by race\" = coef(hi_chol_mean_by_race),\n \"SE of HI_CHOL by race\" = survey::SE(hi_chol_mean_by_race),\n \"CL of HI_CHOL by race\" = confint(\n hi_chol_mean_by_race,\n df = survey::degf(nhanes_design)\n ),\n \"Design Effect of HI_CHOL by race\" = hi_chol_mean_by_race$DEff.HI_CHOL\n))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$`Mean of HI_CHOL`\n HI_CHOL \n0.112143 \n\n$`SE of Mean HI_CHOL`\n HI_CHOL\nHI_CHOL 0.00544584\n\n$`CL of Mean HI_CHOL`\n 2.5 % 97.5 %\nHI_CHOL 0.1005983 0.1236876\n\n$`Sum of HI_CHOL`\n HI_CHOL \n28635245 \n\n$`SE of Sum HI_CHOL`\n HI_CHOL\nHI_CHOL 2020711\n\n$`CL of Sum HI_CHOL`\n 2.5 % 97.5 %\nHI_CHOL 24351530 32918961\n\n$`Ratio of HI_CHOL / RIAGENDR`\nHI_CHOL/RIAGENDR \n 0.07422209 \n\n$`SE of Ratio HI_CHOL / RIAGENDR`\nHI_CHOL/RIAGENDR \n 0.003714728 \n\n$`CL of Ratio HI_CHOL / RIAGENDR`\n 2.5 % 97.5 %\nHI_CHOL/RIAGENDR 0.06634722 0.08209696\n\n$`Proportion of AGECAT`\n agecat(0,19] agecat(19,39] agecat(39,59] agecat(59,Inf] \n 0.2077495 0.2934079 0.3032896 0.1955530 \n\n$`SE of Proportion AGECAT`\n agecat(0,19] agecat(19,39] agecat(39,59] agecat(59,Inf] \n 0.006129950 0.009560692 0.004519463 0.008092578 \n\n$`CL of Proportion AGECAT`\n 2.5 % 97.5 %\nagecat(0,19] 0.1947546 0.2207444\nagecat(19,39] 0.2731401 0.3136756\nagecat(39,59] 0.2937088 0.3128704\nagecat(59,Inf] 0.1783975 0.2127085\n\n$`Quantiles of HI_CHOL`\nHI_CHOL.0.025 HI_CHOL.0.5 HI_CHOL.0.975 \n 0 0 1 \n\n$`SE of Quantiles HI_CHOL`\nHI_CHOL.0.025 HI_CHOL.0.5 HI_CHOL.0.975 \n 0.2358596 0.2358596 0.0000000 \n\n$`CL of Quantiles HI_CHOL`\n l u\nHI_CHOL.0.025 0 1\nHI_CHOL.0.5 0 1\nHI_CHOL.0.975 1 1\n\n$`Mean of HI_CHOL by race`\n 1 2 3 4 \n0.10149167 0.12164921 0.07864006 0.09967861 \n\n$`SE of HI_CHOL by race`\n[1] 0.006245843 0.006604134 0.010384645 0.024666227\n\n$`CL of HI_CHOL by race`\n 2.5 % 97.5 %\n1 0.08825107 0.1147323\n2 0.10764907 0.1356493\n3 0.05662560 0.1006545\n4 0.04738854 0.1519687\n\n$`Design Effect of HI_CHOL by race`\n[1] 1.082734 1.407822 2.091156 3.098290\n```\n\n\n:::\n:::\n\n\n## SAS\n\n```sas\n* Mean, sum quantile of HI_CHOL;\nproc surveymeans data=nhanes mean sum clm quantile=(0.025 0.5 0.975);\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n var HI_CHOL;\nrun;\n\n* Ratio of HI_CHOL / RIAGENDR;\nproc surveymeans data=nhanes;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n ratio HI_CHOL / RIAGENDR;\nrun;\n\n* Proportions of agecat;\nproc surveyfreq data=nhanes;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n table agecat / cl;\nrun;\n\n* Mean and DEFF of HI_CHOL by race;\nproc surveymeans data=nhanes mean deff;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n domain race;\n var HI_CHOL;\nrun;\n```\n\n``` default\n\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error Std Error\n Variable Mean of Mean 95% CL for Mean Sum of Sum 95% CL for Sum\n --------------------------------------------------------------------------------------------------------------------------\n HI_CHOL 0.112143 0.005446 0.10059829 0.12368762 28635245 2020711 24351529.8 32918960.7\n --------------------------------------------------------------------------------------------------------------------------\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n HI_CHOL 2.5 0 0.024281 -0.0514730 0.05147298\n 50 Median 0 0.024281 -0.0514730 0.05147298\n 97.5 0.777070 0.024281 0.7255973 0.82854324\n ---------------------------------------------------------------------------------\n\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error\n Variable N Mean of Mean 95% CL for Mean\n ---------------------------------------------------------------------------------\n HI_CHOL 7846 0.112143 0.005446 0.10059829 0.12368762\n RIAGENDR 8591 1.512019 0.005302 1.50077977 1.52325807\n ---------------------------------------------------------------------------------\n\n\n Ratio Analysis\n\n Std\n Numerator Denominator N Ratio Error 95% CL for Ratio\n ----------------------------------------------------------------------------------------------\n HI_CHOL RIAGENDR 7846 0.074222 0.003715 0.06634722 0.08209696\n ----------------------------------------------------------------------------------------------\n\n The SURVEYFREQ Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Table of agecat\n\n Weighted Std Err of Std Err of 95% Confidence Limits\n agecat Frequency Frequency Wgt Freq Percent Percent for Percent\n -------------------------------------------------------------------------------------------------------\n (0,19] 2532 57450307 3043819 20.7749 0.6130 19.4755 22.0744\n (19,39] 2033 81137975 3692818 29.3408 0.9561 27.3140 31.3676\n (39,59] 2021 83870623 4853936 30.3290 0.4519 29.3709 31.2870\n (59,Inf] 2005 54077541 4284296 19.5553 0.8093 17.8398 21.2709\n\n Total 8591 276536446 13935730 100.0000 \n -------------------------------------------------------------------------------------------------------\n\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error Design\n Variable Mean of Mean Effect\n --------------------------------------------------------\n HI_CHOL 0.112143 0.005446 2.336725\n --------------------------------------------------------\n\n The SURVEYMEANS Procedure\n\n Statistics for race Domains\n\n Std Error Design\n race Variable Mean of Mean Effect\n ------------------------------------------------------------------------\n 1 HI_CHOL 0.101492 0.006246 1.082734\n 2 HI_CHOL 0.121649 0.006604 1.407822\n 3 HI_CHOL 0.078640 0.010385 2.091156\n 4 HI_CHOL 0.099679 0.024666 3.098290\n ------------------------------------------------------------------------\n```\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nimport pandas as pd\nfrom samplics import TaylorEstimator\nfrom samplics.utils.types import PopParam\n\nnhanes = pd.read_csv(\"../data/nhanes.csv\")\n\nnhanes_design_kwargs = dict(\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n remove_nan=True,\n)\n\n# Mean of HI_CHOL\nmean_estimator = TaylorEstimator(PopParam.mean)\nmean_estimator.estimate(nhanes[\"HI_CHOL\"], **nhanes_design_kwargs)\nhi_chol_means = mean_estimator.to_dataframe()\n\n# Sum of HI_CHOL\ntotal_estimator = TaylorEstimator(PopParam.total)\ntotal_estimator.estimate(nhanes[\"HI_CHOL\"], **nhanes_design_kwargs)\nhi_chol_totals = total_estimator.to_dataframe()\n\n# Ratio of HI_CHOL / RIAGENDR\nratio_estimator = TaylorEstimator(PopParam.ratio)\nratio_estimator.estimate(\n y=nhanes[\"HI_CHOL\"], x=nhanes[\"RIAGENDR\"], **nhanes_design_kwargs\n)\nhi_chol_ratio = ratio_estimator.to_dataframe()\n\n# Proportion of different AGECAT values\nprop_estimator = TaylorEstimator(PopParam.prop)\nprop_estimator.estimate(nhanes[\"agecat\"], **nhanes_design_kwargs)\nagecat_prop = prop_estimator.to_dataframe()\n\n# Quantiles of HI_CHOL\n# NA\n\n# Domain analysis of mean of HI_CHOL by race, with design effect\nmean_estimator = TaylorEstimator(PopParam.mean)\nmean_estimator.estimate(\n nhanes[\"HI_CHOL\"],\n **nhanes_design_kwargs,\n domain=nhanes[\"race\"],\n deff=True, # Design effect param currently has no effect\n)\nhi_chol_domain_means = mean_estimator.to_dataframe()\n\n\nag_dict = agecat_prop.set_index(\"_level\").to_dict()\nhc_dict = hi_chol_domain_means.set_index(\"_domain\").to_dict()\n\nprint(\n f\"\"\"\n Mean of HI_CHOL: {hi_chol_means[\"_estimate\"][0]}\n SE of Mean HI_CHOL: {hi_chol_means[\"_stderror\"][0]}\n CL of Mean HI_CHOL: {(hi_chol_means[\"_lci\"][0], hi_chol_means[\"_uci\"][0])}\n Sum of HI_CHOL: {hi_chol_totals[\"_estimate\"][0]}\n SE of Sum HI_CHOL: {hi_chol_totals[\"_stderror\"][0]}\n CL of Sum HI_CHOL: {(hi_chol_totals[\"_lci\"][0], hi_chol_totals[\"_uci\"][0])}\n Ratio of HI_CHOL / RIAGENDR: {hi_chol_ratio[\"_estimate\"][0]}\n SE of Ratio HI_CHOL / RIAGENDR: {hi_chol_ratio[\"_stderror\"][0]}\n CL of Ratio HI_CHOL / RIAGENDR: {(hi_chol_ratio[\"_lci\"][0], hi_chol_ratio[\"_uci\"][0])}\n Proportion of AGECAT: {ag_dict[\"_estimate\"]}\n SE of Proportion AGECAT: {ag_dict[\"_stderror\"]}\n LCL of Proportion AGECAT: {ag_dict[\"_lci\"]}\n UCL of Proportion AGECAT: {ag_dict[\"_uci\"]}\n Quantiles of HI_CHOL: Not available\n Mean of HI_CHOL by race: {hc_dict[\"_estimate\"]}\n SE of HI_CHL by race: {hc_dict[\"_stderror\"]}\n LCL of HI_CHOL by race: {hc_dict[\"_lci\"]}\n UCL of HI_CHOL by race: {hc_dict[\"_uci\"]}\n Design Effect of HI_CHOL by race: Not available\n \"\"\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n Mean of HI_CHOL: 0.11214295634969222\n SE of Mean HI_CHOL: 0.005445839698954557\n CL of Mean HI_CHOL: (np.float64(0.1005982919131703), np.float64(0.12368762078621415))\n Sum of HI_CHOL: 28635245.254672\n SE of Sum HI_CHOL: 2020710.7436996205\n CL of Sum HI_CHOL: (np.float64(24351529.84091034), np.float64(32918960.668433655))\n Ratio of HI_CHOL / RIAGENDR: 0.07422209323594066\n SE of Ratio HI_CHOL / RIAGENDR: 0.0037147278931070065\n CL of Ratio HI_CHOL / RIAGENDR: (np.float64(0.06634722189017901), np.float64(0.0820969645817023))\n Proportion of AGECAT: {'(0,19]': 0.2077494937870972, '(19,39]': 0.29340788818591346, '(39,59]': 0.30328958320385285, '(59,Inf]': 0.19555303482313666}\n SE of Proportion AGECAT: {'(0,19]': 0.006129950336419631, '(19,39]': 0.009560691634608896, '(39,59]': 0.004519462827363183, '(59,Inf]': 0.008092578243976422}\n LCL of Proportion AGECAT: {'(0,19]': 0.19505410930097866, '(19,39]': 0.27355685874096586, '(39,59]': 0.2937950591158628, '(59,Inf]': 0.1789647230500222}\n UCL of Proportion AGECAT: {'(0,19]': 0.2210442684297426, '(19,39]': 0.3140766293472951, '(39,59]': 0.31295496708023285, '(59,Inf]': 0.21327950895208636}\n Quantiles of HI_CHOL: Not available\n Mean of HI_CHOL by race: {1: 0.10149166545397208, 2: 0.12164920535593333, 3: 0.07864006039908408, 4: 0.09967860947712034}\n SE of HI_CHL by race: {1: 0.006245843308749599, 2: 0.006604133623532979, 3: 0.010384645000548863, 4: 0.024666226871851268}\n LCL of HI_CHOL by race: {1: 0.0882510691256497, 2: 0.10764906749064211, 3: 0.056625596431891564, 4: 0.04738854441969514}\n UCL of HI_CHOL by race: {1: 0.11473226178229445, 2: 0.13564934322122454, 3: 0.1006545243662766, 4: 0.15196867453454554}\n Design Effect of HI_CHOL by race: Not available\n \n```\n\n\n:::\n:::\n\n:::\n\n# Differences\n\n## Quantiles {#quantiles}\n\n`samplics` in Python does not have a method for calculating quantiles, and in R and SAS the available methods lead to different results. To demonstrate the differences in calculating quantiles, we will use the `apisrs` dataset from the `survey` package in R [@API_2000].\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survey)\n\ndata(\"api\")\n\nhead(apisrs) |>\n gt::gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n
cdsstypenamesnamesnumdnamednumcnamecnumflagpcttestapi00api99targetgrowthsch.widecomp.impbothawardsmealsellyr.rndmobilityacs.k3acs.46acs.corepct.respnot.hsghsgsome.colcol.gradgrad.schavg.edfullemerenrollapi.stupwfpc
15739081534155HMcFarland HighMcFarland High1039McFarland Unified432Kern14NA984624481814NoYesNoNo4431NA6NANA2482443412731.91713547742930.976194
19642126066716EStowers (Cecil Stowers (Cecil B.) Elementary1124ABC Unified1Los Angeles18NA100878831NA47YesYesYesYes825NA151930NA974102343213.66901047842030.976194
30664493030640HBrea-Olinda HigBrea-Olinda High2868Brea-Olinda Unified79Orange29NA987347423-8NoNoNoNo1010NA7NANA2895592141243.7183181410128730.976194
19644516012744EAlameda ElementAlameda Elementary1273Downey Unified187Los Angeles18NA997726577115YesYesYesYes7025NA2323NANA100374014811.96851834229130.976194
40688096043293ESunnyside ElemeSunnyside Elementary4926San Luis Coastal Unified640San Luis Obispo39NA99739719420YesYesYesYes4312NA122029NA918212734103.17100021718930.976194
19734456014278ELos Molinos EleLos Molinos Elementary2463Hacienda la Puente Unif284Los Angeles18NA93835822NA13YesYesYesNo1619NA131929NA71182038343.96752025821130.976194
\n
\n```\n\n:::\n:::\n\n\nIn SAS, PROC SURVEYMEANS will calculate quantiles of specific probabilities as you request them, using Woodruff's method for intervals and a custom quantile method [@SAS_2018, pp. 9834]. The quantile method does not match any of the available `qrules` in R, and although the default `interval.types` in the R `survey::svyquantile` function also uses Woodruff's method, it is a different implementation.\n\nThe method and results from SAS are as follows:\n\n```sas\nproc surveymeans data=apisrs total=6194 quantile=(0.025 0.5 0.975);\n var growth;\nrun;\n```\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n growth 2.5 -16.500000 1.755916 -19.962591 -13.037409\n 50 Median 26.500000 1.924351 22.705263 30.294737\n 97.5 99.000000 16.133827 67.184794 130.815206\n ---------------------------------------------------------------------------------\n```\n\nIf in R we use the default `qrule=\"math\"` (equivalent to `qrule=\"hf1\"` and matches `type=1` in the `quantile` function for unweighted data) along with the default `interval.type=\"mean\"`, we get the following results:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsrs_design <- survey::svydesign(data = apisrs, id = ~1, fpc = ~fpc, )\n\nsurvey::svyquantile(\n ~growth,\n srs_design,\n quantiles = c(0.025, 0.5, 0.975),\n ci = TRUE,\n se = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$growth\n quantile ci.2.5 ci.97.5 se\n0.025 -16 -21 -12 2.281998\n0.5 27 24 31 1.774887\n0.975 99 84 189 26.623305\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\nHere we can see that the quantiles, confidence intervals, and standard errors do not match SAS. From testing, none of the available `qrule` methods match SAS for the quantile values, so it is recommended to use the default values unless you have need of some of the other properties of different quantile definitions - see [`vignette(\"qrule\", package=\"survey\")`](https://cran.r-project.org/web/packages/survey/vignettes/qrule.pdf) for more detail. If an exact match to SAS is required, then the `svyquantile` function allows for passing a custom function to the `qrule` argument to define your own method for calculating quantiles. Below is an example that will match SAS:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsas_qrule <- function(x, w, p) {\n # Custom qrule to match SAS, based on survey::oldsvyquantile's internal method\n if (any(is.na(x))) {\n return(NA * p)\n }\n w <- rowsum(w, x, reorder = TRUE)\n x <- sort(unique(x))\n cum.w <- cumsum(w) / sum(w)\n cdf <- approxfun(\n cum.w,\n x,\n method = \"linear\",\n f = 1,\n yleft = min(x),\n yright = max(x),\n ties = min\n )\n cdf(p)\n}\n\n\nsas_quants <- survey::svyquantile(\n ~growth,\n srs_design,\n quantiles = c(0.025, 0.5, 0.975),\n qrule = sas_qrule,\n ci = TRUE,\n se = TRUE\n)\n\nsas_quants\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$growth\n quantile ci.2.5 ci.97.5 se\n0.025 -16.5 -22.00000 -15.07482 1.755916\n0.5 26.5 23.03563 30.62510 1.924351\n0.975 99.0 83.70616 147.33657 16.133827\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\nNote that although the quantiles and standard errors match, the confidence intervals still do not match SAS. For this another custom calculation is required, based on the formula used in SAS:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsas_quantile_confint <- function(newsvyquantile, level = 0.05, df = Inf) {\n q <- coef(newsvyquantile)\n se <- survey::SE(newsvyquantile)\n ci <- cbind(\n q,\n q + se * qt(level / 2, df),\n q - se * qt(1 - level / 2, df),\n se\n )\n colnames(ci) <- c(\n \"quantile\",\n paste0(\"ci.\", c(100 * level / 2, 100 * (1 - level / 2))),\n \"se\"\n )\n\n ci\n}\n\nsas_quantile_confint(sas_quants, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n quantile ci.2.5 ci.97.5 se\ngrowth.0.025 -16.5 -19.96259 -19.96259 1.755916\ngrowth.0.5 26.5 22.70526 22.70526 1.924351\ngrowth.0.975 99.0 67.18479 67.18479 16.133827\n```\n\n\n:::\n:::\n\n\n## Other considerations\n\n### Degrees of Freedom\n\nSome of the functions in R require the degrees of freedom to be specified when calculating confidence intervals, otherwise it assumes a normal distribution. This can be done easily by using the `survey::degf` function, which calculates the degrees of freedom for a survey design object.\n\n### Single PSU Strata\n\nAlthough it was not apparent with the examples used here, if there is only one PSU from a stratum then R will by default error, whereas SAS will remove that stratum from the variance calculation. This can be changed in R by setting the `options(survey.lonely.psu=\"certainty\")` to match SAS and have it make no contribution to the variance. In `samplics`, this behaviour can be configured using the `single_psu` argument to the `estimate` method, and can be set to to match SAS using `SinglePSUEst.certainty`. This should be considered carefully however, in R and Python there are additional methods of handling single PSUs that may be more appropriate for your use-case.\n\n### Documentation Differences\n\nOne key consideration when choosing a statistical package is the documentation available. In this case, both the `survey` package in R and the survey procedures in SAS have a much more comprehensive set of documentation and examples than `samplics` in Python. This includes both detailed examples, as well as the underlying theory and methods used in the calculations including references to the literature.\n\n# Summary and Recommendations\n\nThe `{survey}` package in R and the survey procedures in SAS/STAT both provide similar functionality for calculating survey summary statistics. In most cases in both our tests and others, the results are identical ([@2017_YRBS], [@so2020modelling], [@adamico_2009]). Where differences do occur, primarily in calculating quantiles, the methods in R are more varied and well-documented.\n\nIn contrast, the `samplics` package in Python is still early in development, and although it does provide some functionality there are still major limitations in both basic statistics (i.e. quantiles) and in more complex methods that were beyond the scope of this document, and the methods are much less well-documented.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.3 (2026-03-11)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.3/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Python configuration ────────────────────────────────────────────────────────\n Python 3.12.3 (main, Mar 3 2026, 12:15:18) [GCC 13.3.0]\n samplics 0.4.22\n```\n\n\n:::\n:::\n\n:::", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas-summary-stats/execute-results/html.json b/_freeze/Comp/r-sas-summary-stats/execute-results/html.json index 238ae3190..bcc16576f 100644 --- a/_freeze/Comp/r-sas-summary-stats/execute-results/html.json +++ b/_freeze/Comp/r-sas-summary-stats/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "d816264f52fcea7d8df6824cfcd3cba2", + "hash": "bc048ecc2f50f63adfadaa9f44329ef6", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Deriving Quantiles or Percentiles in R vs SAS\"\neval: false\n---\n\n### Data\n\nThe following data will be used show the differences between the default percentile definitions used by SAS and R:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nc(10, 20, 30, 40, 150, 160, 170, 180, 190, 200)\n```\n:::\n\n\n### SAS Code\n\nAssuming the data above is stored in the variable `aval` within the dataset `adlb`, the 25th and 40th percentiles could be calculated using the following code.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc univariate data=adlb;\n var aval;\n output out=stats pctlpts=25 40 pctlpre=p;\nrun;\n```\n:::\n\n\nThis procedure creates the dataset `stats` containing the variables `p25` and `p40`.\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\nThe procedure has the option `PCTLDEF` which allows for five different percentile definitions to be used. The default is `PCTLDEF=5`.\n\n### R code\n\nThe 25th and 40th percentiles of `aval` can be calculated using the `quantile` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nquantile(adlb$aval, probs = c(0.25, 0.4))\n```\n:::\n\n\nThis gives the following output.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n 25% 40% \n 32.5 106.0 \n```\n\n\n:::\n:::\n\n\nThe function has the argument `type` which allows for nine different percentile definitions to be used. The default is `type = 7`.\n\n### Comparison\n\nThe default percentile definition used by the UNIVARIATE procedure in SAS finds the 25th and 40th percentiles to be 30 and 95. The default definition used by R finds these percentiles to be 32.5 and 106.\n\nIt is possible to get the quantile function in R to use the same definition as the default used in SAS, by specifying `type=2`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nalquantile(adlb$aval, probs = c(0.25, 0.4), type = 2)\n```\n:::\n\n\nThis gives the following output.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n25% 40% \n 30 95 \n```\n\n\n:::\n:::\n\n\nIt is not possible to get the UNIVARIATE procedure in SAS to use the same definition as the default used in R.\n\nRick Wicklin provided a [blog post](https://blogs.sas.com/content/iml/2017/05/24/definitions-sample-quantiles.html) showing how SAS has built in support for calculations using 5 of the 9 percentile definitions available in R, and also demonstrated how you can use a SAS/IML function to calculate percentiles using the other 4 definitions.\n\nMore information about quantile derivation can be found in the [SAS blog](https://blogs.sas.com/content/iml/2021/07/26/compare-quantiles-sas-r-python.html).\n\n### Key references:\n\n[Compare the default definitions for sample quantiles in SAS, R, and Python](https://blogs.sas.com/content/iml/2021/07/26/compare-quantiles-sas-r-python.html)\n\n[Sample quantiles: A comparison of 9 definitions](https://blogs.sas.com/content/iml/2017/05/24/definitions-sample-quantiles.html)\n\n[Hyndman, R. J., & Fan, Y. (1996). Sample quantiles in statistical packages. The American Statistician, 50(4), 361-365.](https://www.jstor.org/stable/2684934)\n", + "markdown": "---\ntitle: \"Deriving Quantiles or Percentiles in R vs SAS\"\n---\n\n### Data\n\nThe following data will be used show the differences between the default percentile definitions used by SAS and R:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nc(10, 20, 30, 40, 150, 160, 170, 180, 190, 200)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [1] 10 20 30 40 150 160 170 180 190 200\n```\n\n\n:::\n:::\n\n\n### SAS Code\n\nAssuming the data above is stored in the variable `aval` within the dataset `adlb`, the 25th and 40th percentiles could be calculated using the following code.\n\n```sas\nproc univariate data=adlb;\n var aval;\n output out=stats pctlpts=25 40 pctlpre=p;\nrun;\n```\n\nThis procedure creates the dataset `stats` containing the variables `p25` and `p40`.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas-percentiles-output.jpg){fig-align='center' width=15%}\n:::\n:::\n\n\nThe procedure has the option `PCTLDEF` which allows for five different percentile definitions to be used. The default is `PCTLDEF=5`.\n\n### R code\n\nThe 25th and 40th percentiles of `aval` can be calculated using the `quantile` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nquantile(adlb$aval, probs = c(0.25, 0.4))\n```\n:::\n\n\nThis gives the following output.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n 25% 40% \n 32.5 106.0 \n```\n\n\n:::\n:::\n\n\nThe function has the argument `type` which allows for nine different percentile definitions to be used. The default is `type = 7`.\n\n### Comparison\n\nThe default percentile definition used by the UNIVARIATE procedure in SAS finds the 25th and 40th percentiles to be 30 and 95. The default definition used by R finds these percentiles to be 32.5 and 106.\n\nIt is possible to get the quantile function in R to use the same definition as the default used in SAS, by specifying `type=2`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nalquantile(adlb$aval, probs = c(0.25, 0.4), type = 2)\n```\n:::\n\n\nThis gives the following output.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n25% 40% \n 30 95 \n```\n\n\n:::\n:::\n\n\nIt is not possible to get the UNIVARIATE procedure in SAS to use the same definition as the default used in R.\n\nRick Wicklin provided a [blog post](https://blogs.sas.com/content/iml/2017/05/24/definitions-sample-quantiles.html) showing how SAS has built in support for calculations using 5 of the 9 percentile definitions available in R, and also demonstrated how you can use a SAS/IML function to calculate percentiles using the other 4 definitions.\n\nMore information about quantile derivation can be found in the [SAS blog](https://blogs.sas.com/content/iml/2021/07/26/compare-quantiles-sas-r-python.html).\n\n### Key references:\n\n[Compare the default definitions for sample quantiles in SAS, R, and Python](https://blogs.sas.com/content/iml/2021/07/26/compare-quantiles-sas-r-python.html)\n\n[Sample quantiles: A comparison of 9 definitions](https://blogs.sas.com/content/iml/2017/05/24/definitions-sample-quantiles.html)\n\n[Hyndman, R. J., & Fan, Y. (1996). Sample quantiles in statistical packages. The American Statistician, 50(4), 361-365.](https://www.jstor.org/stable/2684934)\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas-wilcoxon-ranksum_hl/execute-results/html.json b/_freeze/Comp/r-sas-wilcoxon-ranksum_hl/execute-results/html.json index 2d46d3412..77e759e09 100644 --- a/_freeze/Comp/r-sas-wilcoxon-ranksum_hl/execute-results/html.json +++ b/_freeze/Comp/r-sas-wilcoxon-ranksum_hl/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "859f2601b50b8ddc83fcda0363baa68b", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS Wilcoxon Rank-Sum Test\"\n---\n\n\n\n## Introduction\n\nThis page compares the Wilcoxon rank-sum test, Hodges-Lehmann estimator, and estimation of the Mann-Whitney parameter in R and SAS.\n\n\n### Example Data\n\nFor this example we are using a dataset of birth weights for smoking and non-smoking mothers (*Data source: Table 30.4, Kirkwood BR. and Sterne JAC. Essentials of medical statistics. Second Edition. ISBN 978-0-86542-871-3*). This dataset is both small (so an exact test is recommended) and has ties in it.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbw_ns <- c(3.99, 3.89, 3.6, 3.73, 3.31, \n 3.7, 4.08, 3.61, 3.83, 3.41, \n 4.13, 3.36, 3.54, 3.51, 2.71)\nbw_s <- c(3.18, 2.74, 2.9, 3.27, 3.65, \n 3.42, 3.23, 2.86, 3.6, 3.65, \n 3.69, 3.53, 2.38, 2.34)\n\nsmk_data <- data.frame(\n value = c(bw_ns, bw_s), \n smoke = as.factor(rep(c(\"non\", \"smoke\"), c(length(bw_ns), length(bw_s))))\n) \n# Relevel the factors to make it smoker - non-smokers \nsmk_data$smoke <- forcats::fct_relevel(smk_data$smoke, \"smoke\")\nhead(smk_data)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n value smoke\n1 3.99 non\n2 3.89 non\n3 3.60 non\n4 3.73 non\n5 3.31 non\n6 3.70 non\n```\n\n\n:::\n:::\n\n\nTo view the code implementations, see the [SAS](../SAS/ranksum.qmd) and [R](../R/nonpara_wilcoxon_ranksum.qmd) pages, respectively.\n\n\n## Comparison\n\n### Software Capabilities\n\nThe following table provides an overview of the supported analyses between R and SAS. A specific comparison of the results and whether they match are provided below.\n\n| Analysis | Supported in R {stats} | Supported in R {coin} | Supported in R {asht} | Supported in SAS | Notes |\n|----------|------------------------|-----------------------|-----------------------|------------------|-------|\n| Wilcoxon Rank-Sum -- Normal approximation **with** continuity correction | Yes | No | Yes | Yes | In {coin}, one can add `correct=TRUE`, but note that no error is given and the results of a normal approximation approach **without** continuity correction are provided.\n| Wilcoxon Rank-Sum -- Normal approximation **without** continuity correction | Yes | Yes | Yes | Yes |\n| Wilcoxon Rank-Sum -- Exact | Partly | Yes | Partly | Yes | In {stats}, one can only do the exact method when no ties are present.; In {asht}, exact test is possible but the run time is long for larger sample size.\n| Wilcoxon Rank-Sum -- Approximative (Monte Carlo simulation) | No | Yes | Yes | No |\n| Hodges-Lehmann estimator -- Asymptotic | Yes | No | No | Yes |\n| Hodges-Lehmann estimator -- Exact | Partly | Yes | No | Yes | In {stats}, one can only do the exact method when no ties are present.\n| Hodges-Lehmann estimator -- Approximative (Monte Carlo simulation) | No | Yes | No | No |\n| Mann-Whitney parameter | No | No | Yes | No | In {asht}, confidence intervals can be obtained using asymptotic approximation, Monte Carlo simulations, or exact methods (for small sample size)\n\n\n### Wilcoxon Rank Sum test\n\nIn the below table the p-values of the Wilcoxon Rank Sum Test with different options are compared.\n\n| Analysis | R {stats} | R {coin} | R {asht} | SAS | Match | Notes |\n|----------|-----------|----------|----------|-----|-------|-------|\n| Wilcoxon Rank-Sum -- Normal approximation **with** continuity correction | 0.0100 | / | 0.0100 | 0.0100 | Yes | Not possible with {coin}\n| Wilcoxon Rank-Sum -- Normal approximation **without** continuity correction | 0.0094 | 0.0094 | 0.0094 | 0.0094 | Yes | \n| Wilcoxon Rank-Sum -- Exact | / | 0.0082 | / | 0.0082 | Yes | Not possible with {stats} since there are ties.; In {asht} run-time very long. \n| Wilcoxon Rank-Sum -- Approximative (Monte Carlo simulation) | / | 0.0083 | 0.0083 | / | Yes | With 100,000 simulations\n\n\n### Hodges-Lehmann estimator\n\nIn the below table the Hodges-Lehmann estimate and 95% confidence intervals are compared.\n\n| Analysis | R {stats} | R {coin} | R {asht} | SAS | Match | Notes |\n|----------|-----------|----------|----------|-----|-------|-------|\n| Hodges-Lehmann estimator -- Asymptotic | -0.426 (-0.770 to -0.090) | -0.426 (-0.760 to -0.100) | / | -0.425 (-0.770 to -0.090) | No | In {coin}, the CI is the exact CI. The CIs match between {stats} and SAS.\n| Hodges-Lehmann estimator -- Exact | / | -0.425 (-0.760 to -0.100) | / | -0.425 (-0.760 to -0.100) | Yes | Not possible with {stats} since there are ties; In {asht} run-time very long.\n| Hodges-Lehmann estimator -- Approximative (Monte Carlo simulation) | / | -0.425 (-0.760 to -0.100) | / | /| / | With 500,000 simulations\n\n\n### Mann-Whitney Parameter\nThe estimation of the Mann-Whitney parameter is only possible in R `asht` package.\n\n\n\n## Special considerations for one-sided p-values\n\nIt is important to note that in SAS you can get an *unexpected* one-sided p-value. In the SAS documentation for [PROC NPAR1WAY](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_npar1way_details04.htm) it is stated that: \n\n*\"PROC NPAR1WAY computes one-sided and two-sided asymptotic p-values for each two-sample linear rank test. When the test statistic z is greater than its null hypothesis expected value of 0, PROC NPAR1WAY computes the right-sided p-value, which is the probability of a larger value of the statistic occurring under the null hypothesis. When the test statistic is less than or equal to 0, PROC NPAR1WAY computes the left-sided p-value, which is the probability of a smaller value of the statistic occurring under the null hypothesis\"* (similar for the exact p-value). \n\nThus SAS reports the one-sided p-value in the direction of the test statistic. This can cause an *unexpected* one-sided p-value, if your data provides a test statistic in the other direction of the pre-specified one-sided hypothesis.\n\nConsider the following data example to showcase this:\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_used <- data.frame(\n ID = c(\"001\", \"002\", \"003\", \"004\", \"005\", \"006\", \"007\", \"008\", \"009\", \"010\",\n \"011\", \"012\", \"013\", \"014\", \"015\", \"016\", \"017\", \"018\", \"019\", \"020\",\n \"021\", \"022\", \"023\", \"024\", \"025\", \"026\", \"027\", \"028\", \"029\", \"030\"),\n ARM = c(\"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\",\n \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\",\n \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\"),\n Y = c(8.5, 8.9, 8.2, 8.1, 7.1, 7.4, 6.0, 6.5, 7.0, 7.0,\n 6.5, 9.4, 8.9, 8.8, 9.6, 8.3, 8.9, 7.0, 9.1, 6.9,\n 8.0, 7.3, 7.1, 6.2, 4.7, 4.7, 4.2, 4.1, 3.4, 3.9)\n)\ndat_used\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ID ARM Y\n1 001 Placebo 8.5\n2 002 Placebo 8.9\n3 003 Placebo 8.2\n4 004 Placebo 8.1\n5 005 Placebo 7.1\n6 006 Placebo 7.4\n7 007 Placebo 6.0\n8 008 Placebo 6.5\n9 009 Placebo 7.0\n10 010 Placebo 7.0\n11 011 Low 6.5\n12 012 Low 9.4\n13 013 Low 8.9\n14 014 Low 8.8\n15 015 Low 9.6\n16 016 Low 8.3\n17 017 Low 8.9\n18 018 Low 7.0\n19 019 Low 9.1\n20 020 Low 6.9\n21 021 High 8.0\n22 022 High 7.3\n23 023 High 7.1\n24 024 High 6.2\n25 025 High 4.7\n26 026 High 4.7\n27 027 High 4.2\n28 028 High 4.1\n29 029 High 3.4\n30 030 High 3.9\n```\n\n\n:::\n:::\n\n\nSuppose we would have the following two hypothesis, where for both Low Dose and High Dose we expect smaller values (Y) than Placebo:\n\n- $H_{0}$: No difference between Placebo and Low Dose, vs $H_{1}$: Placebo has higher values (Y) than Low Dose\n\n- $H_{0}$: No difference between Placebo and High Dose, vs $H_{1}$: Placebo has higher values (Y) than High Dose\n\n\n### Asymptotic results without continuity correction\n\n**Placebo and High Dose group**\n\nLet us the {coin} package in R to compare the Placebo and High Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"High\")),\n distribution = \"asymptotic\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"High\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"High\")) (Placebo, High)\nZ = 2.5352, p-value = 0.005619\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nIn SAS, the following results is obtained. As can be seen in both R and SAS the one-sided p-value is 0.0056.\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ranksum/SAS_one_sided_pvalue.png){fig-align='center' width=90%}\n:::\n:::\n\n\n\n**Placebo and Low Dose group**\n\nLet us the {coin} package in R to compare the Placebo and Low Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"Low\")),\n distribution = \"asymptotic\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"Low\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"Low\")) (Placebo, Low)\nZ = -1.7066, p-value = 0.9561\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nIn SAS, the following results is obtained. The one-sided p-values clearly do not match ({coin} p-value = 0.9561; SAS p-value = 0.0439). As mentioned above, SAS reports the p-value in the direction of the test statistic. This can cause an *unexpected* one-sided p-value, if your data provides a test statistic in the other directiont than the pre-specified one-sided hypothesis. Do note that $1 - 0.9561 = 0.0439$.\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ranksum/SAS_one_sided_pvalue2.png){fig-align='center' width=90%}\n:::\n:::\n\n\n\n### Exact results\n\n**Placebo and High Dose group**\n\nLet us the {coin} package in R to compare the Placebo and High Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"High\")),\n distribution = \"exact\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"High\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"High\")) (Placebo, High)\nZ = 2.5352, p-value = 0.004682\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nIn SAS (see above), the same one-sided p-value of 0.0047 is obtained.\n\n\n**Placebo and Low Dose group**\n\nLet us the {coin} package in R to compare the Placebo and Low Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"Low\")),\n distribution = \"exact\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"Low\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"Low\")) (Placebo, Low)\nZ = -1.7066, p-value = 0.9574\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nPlease see above for the SAS result. The one-sided p-values clearly do not match ({coin} p-value = 0.9574; SAS p-value = 0.0455).\n\n\n\n## Summary and Recommendation\n\nWilcoxon Rank Sum test and the associated Hodges-Lehmann CI are able to be consistently computed in both SAS and R. The user needs to be aware of some small differences:\n\n- In SAS the `exact wilcoxon hl` statement is needed to get both the exact p-value and CI.\n\n- In {stats} exact values are only possible when there are no ties and the exact parameter is set to true (`exact = TRUE`). This will give the exact p-value and CI.\n\n- In {coin} it is not possible to do a normal approximation **with** continuity correction.\n\n- For the asymptotic Hodges-Lehmann estimator, {stats} and {coin} use an algorithm to define the estimate, whereas SAS provides the *traditional* Hodges-Lehmann estimator. \n\nIf you have a study where you would like to use R for the exact Wilcoxon Rank Sum test and there is the risk of ties, {coin} would be recommended.\n\n\n\n## Ties\n\nIn all presented R packages and SAS, when there are tied values, the average score method (mid-ranks) is used. This is done by first sorting the observations in ascending order and assigning ranks as if there were no ties. The procedure averages the scores for tied observations and assigns this average score to each of the tied observations. Thus, all tied data values have the same score value.\n\n\n## Additional References\n\nProvided are references and additional reading materials for both R and SAS documentation related to the analysis.\n\n**R Documentation:**\n\n- `wilcox.test` function: \n\n- `wilcox_test` function: \n\n**SAS Documentation:**\n\n- `PROC npar1way`: \n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-24\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P coin * 1.4-3 2023-09-27 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"R vs SAS Wilcoxon Rank-Sum Test\"\n---\n\n\n\n## Introduction\n\nThis page compares the Wilcoxon rank-sum test, Hodges-Lehmann estimator, and estimation of the Mann-Whitney parameter in R and SAS.\n\n\n### Example Data\n\nFor this example we are using a dataset of birth weights for smoking and non-smoking mothers (*Data source: Table 30.4, Kirkwood BR. and Sterne JAC. Essentials of medical statistics. Second Edition. ISBN 978-0-86542-871-3*). This dataset is both small (so an exact test is recommended) and has ties in it.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbw_ns <- c(3.99, 3.89, 3.6, 3.73, 3.31, \n 3.7, 4.08, 3.61, 3.83, 3.41, \n 4.13, 3.36, 3.54, 3.51, 2.71)\nbw_s <- c(3.18, 2.74, 2.9, 3.27, 3.65, \n 3.42, 3.23, 2.86, 3.6, 3.65, \n 3.69, 3.53, 2.38, 2.34)\n\nsmk_data <- data.frame(\n value = c(bw_ns, bw_s), \n smoke = as.factor(rep(c(\"non\", \"smoke\"), c(length(bw_ns), length(bw_s))))\n) \n# Relevel the factors to make it smoker - non-smokers \nsmk_data$smoke <- forcats::fct_relevel(smk_data$smoke, \"smoke\")\nhead(smk_data)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n value smoke\n1 3.99 non\n2 3.89 non\n3 3.60 non\n4 3.73 non\n5 3.31 non\n6 3.70 non\n```\n\n\n:::\n:::\n\n\nTo view the code implementations, see the [SAS](../SAS/ranksum.qmd) and [R](../R/nonpara_wilcoxon_ranksum.qmd) pages, respectively.\n\n\n## Comparison\n\n### Software Capabilities\n\nThe following table provides an overview of the supported analyses between R and SAS. A specific comparison of the results and whether they match are provided below.\n\n| Analysis | Supported in R {stats} | Supported in R {coin} | Supported in R {asht} | Supported in SAS | Notes |\n|----------|------------------------|-----------------------|-----------------------|------------------|-------|\n| Wilcoxon Rank-Sum -- Normal approximation **with** continuity correction | Yes | No | Yes | Yes | In {coin}, one can add `correct=TRUE`, but note that no error is given and the results of a normal approximation approach **without** continuity correction are provided.\n| Wilcoxon Rank-Sum -- Normal approximation **without** continuity correction | Yes | Yes | Yes | Yes |\n| Wilcoxon Rank-Sum -- Exact | Partly | Yes | Partly | Yes | In {stats}, one can only do the exact method when no ties are present.; In {asht}, exact test is possible but the run time is long for larger sample size.\n| Wilcoxon Rank-Sum -- Approximative (Monte Carlo simulation) | No | Yes | Yes | No |\n| Hodges-Lehmann estimator -- Asymptotic | Yes | No | No | Yes |\n| Hodges-Lehmann estimator -- Exact | Partly | Yes | No | Yes | In {stats}, one can only do the exact method when no ties are present.\n| Hodges-Lehmann estimator -- Approximative (Monte Carlo simulation) | No | Yes | No | No |\n| Mann-Whitney parameter | No | No | Yes | No | In {asht}, confidence intervals can be obtained using asymptotic approximation, Monte Carlo simulations, or exact methods (for small sample size)\n\n\n### Wilcoxon Rank Sum test\n\nIn the below table the p-values of the Wilcoxon Rank Sum Test with different options are compared.\n\n| Analysis | R {stats} | R {coin} | R {asht} | SAS | Match | Notes |\n|----------|-----------|----------|----------|-----|-------|-------|\n| Wilcoxon Rank-Sum -- Normal approximation **with** continuity correction | 0.0100 | / | 0.0100 | 0.0100 | Yes | Not possible with {coin}\n| Wilcoxon Rank-Sum -- Normal approximation **without** continuity correction | 0.0094 | 0.0094 | 0.0094 | 0.0094 | Yes | \n| Wilcoxon Rank-Sum -- Exact | / | 0.0082 | / | 0.0082 | Yes | Not possible with {stats} since there are ties.; In {asht} run-time very long. \n| Wilcoxon Rank-Sum -- Approximative (Monte Carlo simulation) | / | 0.0083 | 0.0083 | / | Yes | With 100,000 simulations\n\n\n### Hodges-Lehmann estimator\n\nIn the below table the Hodges-Lehmann estimate and 95% confidence intervals are compared.\n\n| Analysis | R {stats} | R {coin} | R {asht} | SAS | Match | Notes |\n|----------|-----------|----------|----------|-----|-------|-------|\n| Hodges-Lehmann estimator -- Asymptotic | -0.426 (-0.770 to -0.090) | -0.426 (-0.760 to -0.100) | / | -0.425 (-0.770 to -0.090) | No | In {coin}, the CI is the exact CI. The CIs match between {stats} and SAS.\n| Hodges-Lehmann estimator -- Exact | / | -0.425 (-0.760 to -0.100) | / | -0.425 (-0.760 to -0.100) | Yes | Not possible with {stats} since there are ties; In {asht} run-time very long.\n| Hodges-Lehmann estimator -- Approximative (Monte Carlo simulation) | / | -0.425 (-0.760 to -0.100) | / | /| / | With 500,000 simulations\n\n\n### Mann-Whitney Parameter\nThe estimation of the Mann-Whitney parameter is only possible in R `asht` package.\n\n\n\n## Special considerations for one-sided p-values\n\nIt is important to note that in SAS you can get an *unexpected* one-sided p-value. In the SAS documentation for [PROC NPAR1WAY](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_npar1way_details04.htm) it is stated that: \n\n*\"PROC NPAR1WAY computes one-sided and two-sided asymptotic p-values for each two-sample linear rank test. When the test statistic z is greater than its null hypothesis expected value of 0, PROC NPAR1WAY computes the right-sided p-value, which is the probability of a larger value of the statistic occurring under the null hypothesis. When the test statistic is less than or equal to 0, PROC NPAR1WAY computes the left-sided p-value, which is the probability of a smaller value of the statistic occurring under the null hypothesis\"* (similar for the exact p-value). \n\nThus SAS reports the one-sided p-value in the direction of the test statistic. This can cause an *unexpected* one-sided p-value, if your data provides a test statistic in the other direction of the pre-specified one-sided hypothesis.\n\nConsider the following data example to showcase this:\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_used <- data.frame(\n ID = c(\"001\", \"002\", \"003\", \"004\", \"005\", \"006\", \"007\", \"008\", \"009\", \"010\",\n \"011\", \"012\", \"013\", \"014\", \"015\", \"016\", \"017\", \"018\", \"019\", \"020\",\n \"021\", \"022\", \"023\", \"024\", \"025\", \"026\", \"027\", \"028\", \"029\", \"030\"),\n ARM = c(\"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\", \"Placebo\",\n \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\", \"Low\",\n \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\", \"High\"),\n Y = c(8.5, 8.9, 8.2, 8.1, 7.1, 7.4, 6.0, 6.5, 7.0, 7.0,\n 6.5, 9.4, 8.9, 8.8, 9.6, 8.3, 8.9, 7.0, 9.1, 6.9,\n 8.0, 7.3, 7.1, 6.2, 4.7, 4.7, 4.2, 4.1, 3.4, 3.9)\n)\ndat_used\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ID ARM Y\n1 001 Placebo 8.5\n2 002 Placebo 8.9\n3 003 Placebo 8.2\n4 004 Placebo 8.1\n5 005 Placebo 7.1\n6 006 Placebo 7.4\n7 007 Placebo 6.0\n8 008 Placebo 6.5\n9 009 Placebo 7.0\n10 010 Placebo 7.0\n11 011 Low 6.5\n12 012 Low 9.4\n13 013 Low 8.9\n14 014 Low 8.8\n15 015 Low 9.6\n16 016 Low 8.3\n17 017 Low 8.9\n18 018 Low 7.0\n19 019 Low 9.1\n20 020 Low 6.9\n21 021 High 8.0\n22 022 High 7.3\n23 023 High 7.1\n24 024 High 6.2\n25 025 High 4.7\n26 026 High 4.7\n27 027 High 4.2\n28 028 High 4.1\n29 029 High 3.4\n30 030 High 3.9\n```\n\n\n:::\n:::\n\n\nSuppose we would have the following two hypothesis, where for both Low Dose and High Dose we expect smaller values (Y) than Placebo:\n\n- $H_{0}$: No difference between Placebo and Low Dose, vs $H_{1}$: Placebo has higher values (Y) than Low Dose\n\n- $H_{0}$: No difference between Placebo and High Dose, vs $H_{1}$: Placebo has higher values (Y) than High Dose\n\n\n### Asymptotic results without continuity correction\n\n**Placebo and High Dose group**\n\nLet us the {coin} package in R to compare the Placebo and High Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"High\")),\n distribution = \"asymptotic\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"High\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"High\")) (Placebo, High)\nZ = 2.5352, p-value = 0.005619\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nIn SAS, the following results is obtained. As can be seen in both R and SAS the one-sided p-value is 0.0056.\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ranksum/SAS_one_sided_pvalue.png){fig-align='center' width=90%}\n:::\n:::\n\n\n\n**Placebo and Low Dose group**\n\nLet us the {coin} package in R to compare the Placebo and Low Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"Low\")),\n distribution = \"asymptotic\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"Low\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"Low\")) (Placebo, Low)\nZ = -1.7066, p-value = 0.9561\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nIn SAS, the following results is obtained. The one-sided p-values clearly do not match ({coin} p-value = 0.9561; SAS p-value = 0.0439). As mentioned above, SAS reports the p-value in the direction of the test statistic. This can cause an *unexpected* one-sided p-value, if your data provides a test statistic in the other directiont than the pre-specified one-sided hypothesis. Do note that $1 - 0.9561 = 0.0439$.\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ranksum/SAS_one_sided_pvalue2.png){fig-align='center' width=90%}\n:::\n:::\n\n\n\n### Exact results\n\n**Placebo and High Dose group**\n\nLet us the {coin} package in R to compare the Placebo and High Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"High\")),\n distribution = \"exact\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"High\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"High\")) (Placebo, High)\nZ = 2.5352, p-value = 0.004682\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nIn SAS (see above), the same one-sided p-value of 0.0047 is obtained.\n\n\n**Placebo and Low Dose group**\n\nLet us the {coin} package in R to compare the Placebo and Low Dose group:\n\n::: {.cell}\n\n```{.r .cell-code}\n# Note: greater implies that H1 is Y1 - Y2 = Placebo - High > 0\ncoin::wilcox_test(\n Y ~ factor(ARM, levels = c(\"Placebo\", \"Low\")),\n distribution = \"exact\",\n alternative = \"greater\",\n data = dat_used %>% dplyr::filter(ARM %in% c(\"Placebo\", \"Low\")))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact Wilcoxon-Mann-Whitney Test\n\ndata: Y by\n\t factor(ARM, levels = c(\"Placebo\", \"Low\")) (Placebo, Low)\nZ = -1.7066, p-value = 0.9574\nalternative hypothesis: true mu is greater than 0\n```\n\n\n:::\n:::\n\n\nPlease see above for the SAS result. The one-sided p-values clearly do not match ({coin} p-value = 0.9574; SAS p-value = 0.0455).\n\n\n\n## Summary and Recommendation\n\nWilcoxon Rank Sum test and the associated Hodges-Lehmann CI are able to be consistently computed in both SAS and R. The user needs to be aware of some small differences:\n\n- In SAS the `exact wilcoxon hl` statement is needed to get both the exact p-value and CI.\n\n- In {stats} exact values are only possible when there are no ties and the exact parameter is set to true (`exact = TRUE`). This will give the exact p-value and CI.\n\n- In {coin} it is not possible to do a normal approximation **with** continuity correction.\n\n- For the asymptotic Hodges-Lehmann estimator, {stats} and {coin} use an algorithm to define the estimate, whereas SAS provides the *traditional* Hodges-Lehmann estimator. \n\nIf you have a study where you would like to use R for the exact Wilcoxon Rank Sum test and there is the risk of ties, {coin} would be recommended.\n\n\n\n## Ties\n\nIn all presented R packages and SAS, when there are tied values, the average score method (mid-ranks) is used. This is done by first sorting the observations in ascending order and assigning ranks as if there were no ties. The procedure averages the scores for tied observations and assigns this average score to each of the tied observations. Thus, all tied data values have the same score value.\n\n\n## Additional References\n\nProvided are references and additional reading materials for both R and SAS documentation related to the analysis.\n\n**R Documentation:**\n\n- `wilcox.test` function: \n\n- `wilcox_test` function: \n\n**SAS Documentation:**\n\n- `PROC npar1way`: \n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P coin * 1.4-3 2023-09-27 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_anova/execute-results/html.json b/_freeze/Comp/r-sas_anova/execute-results/html.json index 0ba37e414..06c18572f 100644 --- a/_freeze/Comp/r-sas_anova/execute-results/html.json +++ b/_freeze/Comp/r-sas_anova/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "79157f1cc7d0b4eae8e7d97d52ab4baf", + "hash": "413f591182708dfadf5464b6fb2b5ba2", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS Linear Models\"\nexecute: \n eval: false\n---\n\n# R vs. SAS ANOVA\n\n## Introduction\n\nThis section compares the implementation of analysis of variance (ANOVA) in R and SAS. ANOVA compares the mean of two or more groups to determine if at least one group is significantly different from the others.\n\nR and SAS give the same result for the linear model. But, there some differences with calculating sums of squares. If you are looking for type I sum of square that is available in base R `stats` package using the `anova()` function. Type II and Type III sum of squares are available in the `car` and the `rstatix` packages. `rstatix` uses the `car` package to calculate the sum of square, but can be considered easier to use as it handles the contrast for type III automatically.\n\n## General Comparison Table\n\nThe following table provides an overview of the support and results comparability between R and SAS for the new analysis point.\n\n+----------+----------------+------------------+---------------+------------------------------------------+\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n+==========+================+==================+===============+==========================================+\n| ANOVA | Yes ✅ | Yes ✅ | Mostly yes | R can't calculate type IV Sum of Squares |\n+----------+----------------+------------------+---------------+------------------------------------------+\n\n### Matching Contrasts: R and SAS {.unnumbered}\n\n### Scenario 1: Basic Functionality\n\n#### R Code Example\n\nIn order to get the ANOVA model fit and sum of squares you can use the `anova` function in the `stats` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(emmeans)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWelcome to emmeans.\nCaution: You lose important information if you filter this package's results.\nSee '? untidy'\n```\n\n\n:::\n\n```{.r .cell-code}\ndrug_trial <- read.csv(\"../data/drug_trial.csv\")\n\nlm_model <- lm(formula = post ~ pre + drug, data = drug_trial)\nlm_model |>\n anova()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Variance Table\n\nResponse: post\n Df Sum Sq Mean Sq F value Pr(>F) \npre 1 802.94 802.94 50.0393 1.639e-07 ***\ndrug 2 68.55 34.28 2.1361 0.1384 \nResiduals 26 417.20 16.05 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\nIt is recommended to use the `emmeans` package to get the contrasts between R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model |>\n emmeans(\"drug\") |>\n contrast(\n method = list(\n \"C vs A\" = c(-1, 1, 0),\n \"E vs CA\" = c(-1, -1, 2)\n )\n )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df t.ratio p.value\n C vs A 0.109 1.80 26 0.061 0.9521\n E vs CA 6.783 3.28 26 2.067 0.0488\n```\n\n\n:::\n:::\n\n\nIn SAS, all contrasts must be manually defined, but the syntax is largely similar in both.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc glm data=work.mycsv;\n class drug;\n model post = pre drug / solution;\n estimate 'C vs A' drug -1 1 0;\n estimate 'E vs CA' drug -1 -1 2;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/clipboard-2025781059.png){fig-align='center' width=75%}\n:::\n\n::: {.cell-output-display}\n![](../images/linear/clipboard-1394032862.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Results Comparison\n\nProvided below is a detailed comparison of the results obtained from both SAS and R.\n\n##### Sums of Squares\n\n+------------------------+-------------+-------------+-------------+\n| Statistic | R Result | SAS Result | Match |\n+========================+=============+=============+=============+\n| Sum of Square (Type I) | 802.94 | 802.94 | Yes |\n| | | | |\n| | 68.55 | 68.55 | |\n+------------------------+-------------+-------------+-------------+\n| Degrees of Freedom | 1 | 1 | Yes |\n| | | | |\n| | 2 | 2 | |\n+------------------------+-------------+-------------+-------------+\n| Mean Square | 802.94 | 802.94 | Yes |\n| | | | |\n| | 34.28 | 34.28 | |\n+------------------------+-------------+-------------+-------------+\n| F Value | 50.04 | 50.04 | Yes |\n| | | | |\n| | 2.14 | 2.14 | |\n+------------------------+-------------+-------------+-------------+\n| p-value | \\<0.0001 | \\<0.0001 | Yes |\n| | | | |\n| | 0.1384 | 0.1384 | |\n+------------------------+-------------+-------------+-------------+\n\n##### Contrasts\n\n| Statistic | R Result | SAS Result | Match |\n|---------------------------------|----------|------------|-------|\n| ***contrast estimate C vs A*** | 0.109 | 0.109 | Yes |\n| SE | 1.80 | 1.80 | Yes |\n| t-ratio | 0.06 | 0.06 | Yes |\n| p-value | 0.9521 | 0.9521 | Yes |\n| ***contrast estimate E vs CA*** | 6.783 | 6.783 | Yes |\n| SE | 3.28 | 3.28 | Yes |\n| t-ratio | 2.07 | 2.07 | Yes |\n| p-value | 0.0488 | 0.0488 | Yes |\n\nNote, however, that there are some cases where the scale of the parameter estimates between SAS and R is off, though the test statistics and p-values are identical. In these cases, we can adjust the SAS code to include a divisor. As far as we can tell, this difference only occurs when using the predefined Base R contrast methods like `contr.helmert`.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc glm data=work.mycsv;\n class drug;\n model post = pre drug / solution;\n estimate 'C vs A' drug -1 1 0 / divisor = 2;\n estimate 'E vs CA' drug -1 -1 2 / divisor = 6;\nrun;\n```\n:::\n\n\n## Summary and Recommendation\n\nThere were no major differences between the R emmeans package and the SAS PROC GLM step in conducting ANOVA on the clinical trial data. Both are robust software tools that generate mostly same results. Scaling for parameter coefficients need to be handled with care however as contrast estimates between R and S differed by a sign.\n\n## Additional References\n\nProvide references and additional reading materials for both R and SAS documentation related to the analysis.\n\n**R Documentation:**\n\n- `lm` function: \n- `emmeans` package: \n\n**SAS Documentation:**\n\n- `PROC GLM`: ", + "markdown": "---\ntitle: \"R vs SAS Linear Models\"\n---\n\n# R vs. SAS ANOVA\n\n## Introduction\n\nThis section compares the implementation of analysis of variance (ANOVA) in R and SAS. ANOVA compares the mean of two or more groups to determine if at least one group is significantly different from the others.\n\nR and SAS give the same result for the linear model. But, there some differences with calculating sums of squares. If you are looking for type I sum of square that is available in base R `stats` package using the `anova()` function. Type II and Type III sum of squares are available in the `car` and the `rstatix` packages. `rstatix` uses the `car` package to calculate the sum of square, but can be considered easier to use as it handles the contrast for type III automatically.\n\n## General Comparison Table\n\nThe following table provides an overview of the support and results comparability between R and SAS for the new analysis point.\n\n+----------+----------------+------------------+---------------+------------------------------------------+\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n+==========+================+==================+===============+==========================================+\n| ANOVA | Yes ✅ | Yes ✅ | Mostly yes | R can't calculate type IV Sum of Squares |\n+----------+----------------+------------------+---------------+------------------------------------------+\n\n### Matching Contrasts: R and SAS {.unnumbered}\n\n### Scenario 1: Basic Functionality\n\n#### R Code Example\n\nIn order to get the ANOVA model fit and sum of squares you can use the `anova` function in the `stats` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(emmeans)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWelcome to emmeans.\nCaution: You lose important information if you filter this package's results.\nSee '? untidy'\n```\n\n\n:::\n\n```{.r .cell-code}\ndrug_trial <- read.csv(\"../data/drug_trial.csv\")\n\nlm_model <- lm(formula = post ~ pre + drug, data = drug_trial)\nlm_model |>\n anova()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Variance Table\n\nResponse: post\n Df Sum Sq Mean Sq F value Pr(>F) \npre 1 802.94 802.94 50.0393 1.639e-07 ***\ndrug 2 68.55 34.28 2.1361 0.1384 \nResiduals 26 417.20 16.05 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\nIt is recommended to use the `emmeans` package to get the contrasts between R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model |>\n emmeans(\"drug\") |>\n contrast(\n method = list(\n \"C vs A\" = c(-1, 1, 0),\n \"E vs CA\" = c(-1, -1, 2)\n )\n )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df t.ratio p.value\n C vs A 0.109 1.80 26 0.061 0.9521\n E vs CA 6.783 3.28 26 2.067 0.0488\n```\n\n\n:::\n:::\n\n\nIn SAS, all contrasts must be manually defined, but the syntax is largely similar in both.\n\n```sas\nproc glm data=work.mycsv;\n class drug;\n model post = pre drug / solution;\n estimate 'C vs A' drug -1 1 0;\n estimate 'E vs CA' drug -1 -1 2;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/clipboard-2025781059.png){fig-align='center' width=75%}\n:::\n\n::: {.cell-output-display}\n![](../images/linear/clipboard-1394032862.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Results Comparison\n\nProvided below is a detailed comparison of the results obtained from both SAS and R.\n\n##### Sums of Squares\n\n+------------------------+-------------+-------------+-------------+\n| Statistic | R Result | SAS Result | Match |\n+========================+=============+=============+=============+\n| Sum of Square (Type I) | 802.94 | 802.94 | Yes |\n| | | | |\n| | 68.55 | 68.55 | |\n+------------------------+-------------+-------------+-------------+\n| Degrees of Freedom | 1 | 1 | Yes |\n| | | | |\n| | 2 | 2 | |\n+------------------------+-------------+-------------+-------------+\n| Mean Square | 802.94 | 802.94 | Yes |\n| | | | |\n| | 34.28 | 34.28 | |\n+------------------------+-------------+-------------+-------------+\n| F Value | 50.04 | 50.04 | Yes |\n| | | | |\n| | 2.14 | 2.14 | |\n+------------------------+-------------+-------------+-------------+\n| p-value | \\<0.0001 | \\<0.0001 | Yes |\n| | | | |\n| | 0.1384 | 0.1384 | |\n+------------------------+-------------+-------------+-------------+\n\n##### Contrasts\n\n| Statistic | R Result | SAS Result | Match |\n|---------------------------------|----------|------------|-------|\n| ***contrast estimate C vs A*** | 0.109 | 0.109 | Yes |\n| SE | 1.80 | 1.80 | Yes |\n| t-ratio | 0.06 | 0.06 | Yes |\n| p-value | 0.9521 | 0.9521 | Yes |\n| ***contrast estimate E vs CA*** | 6.783 | 6.783 | Yes |\n| SE | 3.28 | 3.28 | Yes |\n| t-ratio | 2.07 | 2.07 | Yes |\n| p-value | 0.0488 | 0.0488 | Yes |\n\nNote, however, that there are some cases where the scale of the parameter estimates between SAS and R is off, though the test statistics and p-values are identical. In these cases, we can adjust the SAS code to include a divisor. As far as we can tell, this difference only occurs when using the predefined Base R contrast methods like `contr.helmert`.\n\n```sas\nproc glm data=work.mycsv;\n class drug;\n model post = pre drug / solution;\n estimate 'C vs A' drug -1 1 0 / divisor = 2;\n estimate 'E vs CA' drug -1 -1 2 / divisor = 6;\nrun;\n```\n\n## Summary and Recommendation\n\nThere were no major differences between the R emmeans package and the SAS PROC GLM step in conducting ANOVA on the clinical trial data. Both are robust software tools that generate mostly same results. Scaling for parameter coefficients need to be handled with care however as contrast estimates between R and S differed by a sign.\n\n## Additional References\n\nProvide references and additional reading materials for both R and SAS documentation related to the analysis.\n\n**R Documentation:**\n\n- `lm` function: \n- `emmeans` package: \n\n**SAS Documentation:**\n\n- `PROC GLM`: ", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_binomial_test/execute-results/html.json b/_freeze/Comp/r-sas_binomial_test/execute-results/html.json new file mode 100644 index 000000000..e7f522ef5 --- /dev/null +++ b/_freeze/Comp/r-sas_binomial_test/execute-results/html.json @@ -0,0 +1,15 @@ +{ + "hash": "a70159ae9225fe08bcbdad2e62fd2aa8", + "result": { + "engine": "knitr", + "markdown": "---\ntitle: \"r-sas_binomial\"\n---\n\n\n\n# Binomial Test Comparison.\n\n## One Sample Proportion\n\nThe following table shows the types of Binomial test analysis, the capabilities of each language, and whether or not the results from each language match.\n\n+-----------------------------------------------------+----------------------+--------------------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| **Analysis** | **Supported in SAS** | **Supported in R** | **Match** | **Notes** |\n+-----------------------------------------------------+----------------------+--------------------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Exact Binomial Test | Yes | Yes | Yes | Performed in Base R, using `binom.test()` function which executes the exact test of a single proportion based on exact binomial distribution. SAS uses `PROC FREQ` with binomial option and level to define category of success variable. |\n+-----------------------------------------------------+----------------------+--------------------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Asymptotic Binomial Test (Wald test for proportion) | Yes | Yes | Yes | There is no base R function (library(help=\"stats\")) for one‑sample binomial proportion. The Wald test can be done manually based on z statistic formula; `BinomCI()` gives the confidence interval but does not perform the formal hypothesis test. In SAS, it is implemented by default. To implement continuity correction, apply correct syntax and use `level` to define category of success variable. |\n+-----------------------------------------------------+----------------------+--------------------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Mid-P adjusted Exact Binomial Test | Yes | Yes | Yes | Not implemented by base R. Apply R package `exactci` for one sample proportion. SAS applies `PROC FREQ` with `EXACT BINOMIAL / MIDP` option. Mid-P binomial test is less conservative than the exact binomial test. |\n+-----------------------------------------------------+----------------------+--------------------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Wilson score test | Yes | Yes | Yes | Implemented in base R by applying the inbuilt function `prop.test()` which can perform both one and two samples z-tests of proportions. In SAS, it is implemented using `PROC FREQ` with a binomial option and `CL=SCORE` for confidence interval. |\n+-----------------------------------------------------+----------------------+--------------------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n# Comparison Results\n\nHere is a table of comparison values between `binom.test()` and SAS `PROC FREQ` with binomial option:\n\n1. **Binomial Test on coin flips.**\n\n $H_0 : p = 0.5$\n\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Test | Statistic | Binom.test( ) | PROC FREQ with Binomial option | Match | Notes |\n+=======================================+========================+======================+================================+=======+==========================================================================================================================================================================================================================================================================================================================================+\n| Exact Binomial Test (Clopper-Pearson) | Probability of success | 0.52 | 0.52 | Yes | Binomial distributiion |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence Interval | Lower: 0.4885 | Lower: 0.4885 | Yes | Constructed by inverting exact binomial test, with interval bounds obtained from beta distribution quantile. |\n| | | | | | |\n| | | Upper: 0.5513 | Upper: 0.5514 | | |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p- value | 0.2174 | 0.2174 | Yes | Two tailed p_value. R generates two tailed test by default. SAS generates both two sided and right sided p-value. |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Asymptotic Binomial Test( Wald test) | Probability of success | 0.52 | 0.52 | Yes | |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence interval | Lower: 0.4890 | Lower: 0.4890 | Yes | Use normal approximation to the binomial distribution for confidence intervals and hypothesis tests. |\n| | | | | | |\n| | | Upper: 0.5510 | Upper: 0.5510 | | |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p- value | 0.2059 | 0.2059 | Yes | Two sided p_value. Since the Wald test is done manually in R based on z statistic formula, p_value can be calculated for either tail tests. SAS generates both two sided and right sided p-value. |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Mid-P adjusted Exact Binomial Test | Probability of success | 0.52 | 0.52 | Yes | Binomial distribution |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence interval | Lower: 0.4890 | Lower: 0.4890 | Yes | |\n| | | | | | |\n| | | Upper: 0.5509 | Upper: 0.5509 | | |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p-value | Right sided : 0.1031 | Exact one sided: 0.1031 | Yes | SAS default is right sided p-value. R default is two sided p-value. A Request of a right sided test p-value in R matches that of SAS. This because both software apply same midpoint definition. |\n| | | | | | |\n| | | Two sided: 0.2061 | | | |\n| | | | | | |\n| | | Left sided: 0.8969 | | | |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Wilson Score Test | Probability of success | 0.52 | 0.52 | Yes | |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence interval | Lower: 0.4890 | Lower: 0.4890 | Yes | Obtained by inverting the score (Pearson chi-square) test for a single binomial proportion. It's formed by a set of values that are not rejected by Pearson's chi-square score test. It is applicable for extreme data, for instance, when the number of successes (x)=0 which is unlikely for Wald interval since it collapses \\[0,0\\]. |\n| | | | | | |\n| | | Upper: 0.5508 | Upper: 0.5508 | | |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p-value | 0.2059 | 0.2059 | Yes | Two sided p-value. R generates two tailed test by default. SAS generates both two sided and right sided p-value. |\n+---------------------------------------+------------------------+----------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n2. **Binomial Test with Clinical Trial Data.**\n\n$H_0 : p = 0.19$\n\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Test | Statistic | Binom.test( ) | PROC FREQ with binomial option | Match | Notes |\n+========================================+========================+==========================+================================+=======+==========================================================================================================================================================================================================================================================================================================================================+\n| Exact Binomial Test ( Clopper-Pearson) | Probability of success | 0.2763 | 0.2763 | Yes | Binomial distribution |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence interval | Lower:0.2193 | Lower:0.2193 | Yes | Constructed by inverting exact binomial test, with interval bounds obtained from beta distribution quantile. |\n| | | | | | |\n| | | Upper:0.3392 | Upper:0.3392 | | |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p-value | 0.0017 | 0.0019 | Yes | Two tailed p-value. R generates two tailed test by default. SAS generates both two sided and right sided p-value. |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Asymptotic Binomial Test (Wald Test) | Probability of success | 0.2763 | 0.2763 | Yes | |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence interval | Lower: 0.2183 | Lower: 0.2183 | Yes | Use normal approximation to the binomial distribution for confidence intervals and hypothesis tests. |\n| | | | | | |\n| | | Upper: 0.3344 | Upper: 0.3344 | | |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p-value | 0.0009 | 0.0009 | Yes | Two sided p_value. Since the Wald test is done manually in R based on z statistic formula, p_value can be calculated for either tail tests. SAS generates both two sided and right sided p-value. |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Mid-P adjusted Exact Binomial Test | Probability of success | 0.2763 | 0.2763 | Yes | Binomial distribution |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence interval | Lower: 0.2212 | Lower: 0.2212 | Yes | |\n| | | | | | |\n| | | Upper: 0.3371 | Upper: 0.3371 | | |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p-value | one-tailed Upper: 0.0008 | exact one sided: 0.0008 | Yes | SAS default is right sided p-value. R default is two sided p-value. A Request of a right sided test p-value in R matches that of SAS. This because both software apply same midpoint definition. |\n| | | | | | |\n| | | Two tailed: 0.0015 | | | |\n| | | | | | |\n| | | one-tailed Lower: 0.9992 | | | |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Wilson Score Test | Probability of success | 0.2763 | 0.2763 | Yes | |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | Confidence interval | Lower: 0.2223 | Lower: 0.2223 | Yes | Obtained by inverting the score (Pearson chi-square) test for a single binomial proportion. It's formed by a set of values that are not rejected by Pearson's chi-square score test. It is applicable for extreme data, for instance, when the number of successes (x)=0 which is unlikely for Wald interval since it collapses \\[0,0\\]. |\n| | | | | | |\n| | | Upper: 0.3377 | Upper: 0.3377 | | |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| | p-value | 0.0009 | 0.0009 | Yes | Two sided p-value. R generates two tailed test by default. SAS generates both two sided and right sided p-value. |\n+----------------------------------------+------------------------+--------------------------+--------------------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n# Summary and Recommendation\n\nTest results for the two example data are identical in both R and SAS for every instance. For the Exact Binomial test in both software, `binom.test()` and the `PROC FREQ` procedure with Binomial option offer Clopper-Pearson confidence intervals. Default is two sided alternative. Exact test is based on binomial distribution.\n\nAsymptotic Binomial Test uses normal approximation to the binomial distribution for confidence intervals and hypothesis tests, which is suitable for large samples. Default tests are typically two-sided in both R and SAS. SAS use normal approximation for binomial proportion test in `PROC FREQ`. Since asymptotic method assumes large samples, it is therefore not reliable for small samples or for proportions closer to 0 or 1. The confidence interval can also go beyond \\[0,1\\].\n\nFor one sample case in R , Mid-P adjusted Exact Binomial Test implements `tsmethod = ' central'` by default which gives Garwood(1936) exact central intervals. They are obtained by inverting the mid-p value function. `PROC FREQ` provides exact mid-p-values if you specify `MIDP` option in the `EXACT` statement `CL=MIDP` for confidence interval.\n\n`prop.test()` by default applies Yates continuity correction (for this case, it was not applied: `correction=FALSE`). Wilson interval corresponds to Pearson's chi-square test. If Yates' continuity correction is implemented to the chi-square test, the resultant confidence interval is that of continuity corrected Wilson interval. It works well for small number of trials(n) and probability of success(p) as well as offering better coverage.\n\nMore detailed information around CIs for proportions can be found [here](https://psiaims.github.io/CAMIS/Comp/r-sas_ci_for_prop.html)\n\n\n# References\n\n`binom.test()` documentation: \n\n`Package 'exactci'` documentation: \n\n`PROC FREQ with binomial option` documentation: \n\n`PROC FREQ with EXACT statement` documentation: \n\n`Binomial proportion confidence interval` documentation: \n", + "supporting": [], + "filters": [ + "rmarkdown/pagebreak.lua" + ], + "includes": {}, + "engineDependencies": {}, + "preserve": {}, + "postProcess": true + } +} \ No newline at end of file diff --git a/_freeze/Comp/r-sas_chi-sq/execute-results/html.json b/_freeze/Comp/r-sas_chi-sq/execute-results/html.json index 964f836df..d588702da 100644 --- a/_freeze/Comp/r-sas_chi-sq/execute-results/html.json +++ b/_freeze/Comp/r-sas_chi-sq/execute-results/html.json @@ -1,11 +1,9 @@ { - "hash": "d40115fc633c8f5301f8f1830e2c0b97", + "hash": "b4473d2b236e52cb06c418e4f5450b7c", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R/SAS Chi-Squared and Fisher's Exact Comparision\"\nexecute: \n eval: false\n---\n\n# Chi-Squared Test\n\nChi-Squared test is a hypothesis test for independent contingency tables, dependent on rows and column totals. The test assumes:\n\n- observations are independent of each other\n\n- all values are 1 or more and at least 80% of the cells are greater than 5.\n\n- data should be categorical\n\nThe Chi-Squared statistic is found by:\n\n$$\n\\chi^2=\\frac{\\sum(O-E)^2}{E}\n$$\n\nWhere O is the observed and E is the expected.\\\nFor an r x c table (where r is the number of rows and c the number of columns), the Chi-squared distribution's degrees of freedom is (r-1)\\*(c-1). The resultant statistic with correct degrees of freedom follows this distribution when its expected values are aligned with the assumptions of the test, under the null hypothesis. The resultant p value informs the magnitude of disagreement with the null hypothesis and not the magnitude of association\n\nFor this example we will use data about cough symptoms and history of bronchitis.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbronch <- matrix(c(26, 247, 44, 1002), ncol = 2)\nrow.names(bronch) <- c(\"cough\", \"no cough\")\ncolnames(bronch) <- c(\"bronchitis\", \"no bronchitis\")\nbronch\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n bronchitis no bronchitis\ncough 26 44\nno cough 247 1002\n```\n\n\n:::\n:::\n\n\nTo a chi-squared test in R you will use the following code.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::chisq.test(bronch)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tPearson's Chi-squared test with Yates' continuity correction\n\ndata: bronch\nX-squared = 11.145, df = 1, p-value = 0.0008424\n```\n\n\n:::\n:::\n\n\nTo run a chi-squared test in SAS you used the following code.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data=proj1.bronchitis;\ntables Cough*Bronchitis / chisq;\nrun;\n```\n:::\n\n\nThe result in the \"Chi-Square\" section of the results table in SAS will not match R, in this case it is 12.1804 with a p-value of 0.0005. This is because by default R does a Yates continuity adjustment for 2x2 tables. To change this set `correct` to false.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::chisq.test(bronch, correct = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tPearson's Chi-squared test\n\ndata: bronch\nX-squared = 12.18, df = 1, p-value = 0.0004829\n```\n\n\n:::\n:::\n\n\nAlternatively, SAS also produces the adjusted chi-square value by default. It is the \"Continuity Adj. Chi-Square\" value in the results table.\n\n# Egon Pearson Chi-Squared Test\n\nThe 'N-1' corrected $\\chi^2$ test for association in a 2x2 table (for matching a Miettinen-Nurminen confidence interval) is not available in SAS PROC FREQ, but can be obtained from the `%SCORECI` macro (see [this page](https://psiaims.github.io/CAMIS/SAS/ci_for_2indep_prop.html) for details). It is also produced by the corresponding {ratesci} package for R.\n\n# Fisher's Exact Test\n\nComparison between the Fisher's Exact Test in both R and SAS shows that the two software match on the p-value and confidence intervals. The odd ratio does not match. The reason the odds ratio does not match is because R uses an \"exact\" odds ratio based on the hypergeomtric distribution, while SAS uses a standard AD/BC odds ratio. Note that R always uses an \"exact\" Fisher test. Therefore, when trying to match SAS, you must use the \"exact\" statement on the PROC FREQ.\n", - "supporting": [ - "r-sas_chi-sq_files" - ], + "markdown": "---\ntitle: \"R/SAS Chi-Squared and Fisher's Exact Comparision\"\n---\n\n# Chi-Squared Test\n\nChi-Squared test is a hypothesis test for independent contingency tables, dependent on rows and column totals. The test assumes:\n\n- observations are independent of each other\n\n- all values are 1 or more and at least 80% of the cells are greater than 5.\n\n- data should be categorical\n\nThe Chi-Squared statistic is found by:\n\n$$\n\\chi^2=\\frac{\\sum(O-E)^2}{E}\n$$\n\nWhere O is the observed and E is the expected.\\\nFor an r x c table (where r is the number of rows and c the number of columns), the Chi-squared distribution's degrees of freedom is (r-1)\\*(c-1). The resultant statistic with correct degrees of freedom follows this distribution when its expected values are aligned with the assumptions of the test, under the null hypothesis. The resultant p value informs the magnitude of disagreement with the null hypothesis and not the magnitude of association\n\nFor this example we will use data about cough symptoms and history of bronchitis.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbronch <- matrix(c(26, 247, 44, 1002), ncol = 2)\nrow.names(bronch) <- c(\"cough\", \"no cough\")\ncolnames(bronch) <- c(\"bronchitis\", \"no bronchitis\")\nbronch\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n bronchitis no bronchitis\ncough 26 44\nno cough 247 1002\n```\n\n\n:::\n:::\n\n\nTo a chi-squared test in R you will use the following code.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::chisq.test(bronch)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tPearson's Chi-squared test with Yates' continuity correction\n\ndata: bronch\nX-squared = 11.145, df = 1, p-value = 0.0008424\n```\n\n\n:::\n:::\n\n\nTo run a chi-squared test in SAS you used the following code.\n\n```sas\nproc freq data=proj1.bronchitis;\ntables Cough*Bronchitis / chisq;\nrun;\n```\n\nThe result in the \"Chi-Square\" section of the results table in SAS will not match R, in this case it is 12.1804 with a p-value of 0.0005. This is because by default R does a Yates continuity adjustment for 2x2 tables. To change this set `correct` to false.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::chisq.test(bronch, correct = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tPearson's Chi-squared test\n\ndata: bronch\nX-squared = 12.18, df = 1, p-value = 0.0004829\n```\n\n\n:::\n:::\n\n\nAlternatively, SAS also produces the adjusted chi-square value by default. It is the \"Continuity Adj. Chi-Square\" value in the results table.\n\n# Egon Pearson 'N-1' Chi-Squared Test\n\nThe 'N-1' corrected $\\chi^2$ test for association in a 2x2 table (for matching a Miettinen-Nurminen confidence interval) is not available in SAS PROC FREQ, but can be obtained from the `%SCORECI` macro (see [this page](https://psiaims.github.io/CAMIS/SAS/ci_for_2indep_prop.html) for details). It is also produced by the corresponding {ratesci} package for R (and in the form of a z-statistic from `gsDesign::testBinomial(..., adj=1)`).\n\n# Fisher's Exact Test\n\nComparison between the Fisher's Exact Test in both R and SAS shows that the two software match on the p-value and confidence intervals. The odd ratio does not match. The reason the odds ratio does not match is because R uses an \"exact\" odds ratio based on the hypergeomtric distribution, while SAS uses a standard AD/BC odds ratio. Note that R always uses an \"exact\" Fisher test. Therefore, when trying to match SAS, you must use the \"exact\" statement on the PROC FREQ.\n", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/Comp/r-sas_ci_for_2indep_prop/execute-results/html.json b/_freeze/Comp/r-sas_ci_for_2indep_prop/execute-results/html.json index 60f8a24b6..e274eeac4 100644 --- a/_freeze/Comp/r-sas_ci_for_2indep_prop/execute-results/html.json +++ b/_freeze/Comp/r-sas_ci_for_2indep_prop/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "4642c76b25ae306cf6f7b01f2c4ac8bb", + "hash": "5080ebdd1c477e41bade0831507c968d", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS Confidence Intervals for Independent Proportions\"\n---\n\n## Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nFor more technical derivation of methods for comparing independent proportions, see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_2indep_prop.html).\n\nThe tables below provide an overview of findings from R & SAS, for calculation of CIs for 2 independent sample proportions.\n\n## General Comparison Table For Two Independent Samples Proportions\n\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Analysis of Two Independant Sample Proportions | Supported in R | Supported in SAS | Results Match |\n+====================================================================+====================================================================+==================+===============================================================================================================================================================================================================================================+\n| Normal approximation (Wald Method) | Yes {DescTools} | Yes (default) | Yes and results match by hand calculation |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"wald\"))` | | **Warning** The documentation for stats::prop.test which is used in {cardx} says it's using newcombe method. However, the results match the Normal Approximation (wald) method. Hence it is reccomended to use {DescTools} instead of {cardx} |\n| | | | |\n| | Yes {cardx} `ard_stats_prop_test function` uses `stats::prop.test` | | |\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Normal approximation (Wald Method) with continuity correction | Yes {DescTools} | Yes | Yes |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"waldcc\"))` | | **Warning** that documentation for stats::prop.test says it's using newcombe method. However, the results match the Normal Approximation (wald) method. |\n| | | | |\n| | Yes {cardx} as per above but with correct=TRUE | | |\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Wilson (Score, Altman, Newcombe) method | Yes {DescTools} | Yes | Yes and results match by hand calculation |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"score\"))` | | |\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Wilson (Score, Altman, Newcombe) method with continuity correction | Yes {DescTools} | Yes | Yes |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"scorecc\"))` | | |\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Agresti-Caffo | Yes {DescTools} | Yes | Yes |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"ac\"))` | | |\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Hauck-Anderson | Yes {DescTools} | Yes | Yes |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"ha\"))` | | |\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Miettinen-Nurminen | Yes {DescTools} | Yes | Yes |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"mn\"))` | | |\n+--------------------------------------------------------------------+--------------------------------------------------------------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n## Prerequisites: R Packages\n\nSee the [R page](https://psiaims.github.io/CAMIS/R/ci_for_prop.html) for more detail.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Example R packages required\nlibrary(cardx)\nlibrary(ratesci)\n```\n:::\n\n", + "markdown": "---\ntitle: \"R vs SAS Confidence Intervals for Independent Proportions\"\n---\n\n# Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nFor more technical derivation of methods for comparing independent proportions, see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_2indep_prop.html).\n\nThe tables below provide an overview of findings from R & SAS, for calculation of CIs for comparisons of 2 independent proportions.\n\nNote that if the number of responses in both groups is zero, SAS fails to produce confidence intervals for the difference.\n\n# Proportion Difference\n\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Analysis of Two Independent Sample Proportions | Supported in R | Supported in SAS | Results Match |\n+===============================================================+=======================================+================================+===========================================================================================+\n| Normal approximation (Wald Method) | Yes {ratesci} `rdci()` | Yes (default) | Yes and results match by hand calculation |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomDiffCI(..,method = c(\"wald\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Agresti-Caffo | Yes {ratesci} `rdci()` | Yes | Yes |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"ac\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| MOVER Wilson (Newcombe hybrid score) method | Yes {ratesci} `rdci()` | Yes ('Newcombe') | Yes and results match by hand calculation. |\n| | | | |\n| | Yes {DescTools} | | **Warning**: {DescTools} 'method=score' not to be confused with Asymptotic Score methods. |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"score\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| MOVER Jeffreys | Yes {ratesci} `rdci()` | No | NA |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Miettinen-Nurminen Asymptotic Score | Yes {ratesci} `rdci()` | Yes | Yes |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"mn\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Mee Asymptotic Score | Yes {ratesci} `rdci()` | Yes ('Miettinen-Nurminen-Mee') | Yes |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"mn\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Skewness-corrected Asymptotic Score | Yes {ratesci} `rdci()` | No | NA |\n| | | | |\n| | Yes {ratesci} | | |\n| | | | |\n| | `scasci(..,contrast=\"RD\")` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| | | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Normal approximation (Wald Method) with continuity correction | Yes {ratesci} `rdci(..., cc=TRUE)` | Yes | Yes |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"waldcc\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Hauck-Anderson continuity adjusted | Yes {ratesci} `rdci(..., cc=TRUE)` | Yes | Yes |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"ha\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| MOVER Wilson (Newcombe) method with continuity adjustment | Yes {ratesci} `rdci(..., cc=TRUE)` | Yes ('Newcombe (Corrected)') | Yes |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"scorecc\"))` | | |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| MOVER Jeffreys with continuity adjustment | Yes {ratesci} `rdci(..., cc=TRUE)` | No | NA |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| Asymptotic Score methods with continuity adjustment | Yes {ratesci} `rdci(..., cc=TRUE)` | No | NA |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n| 'Exact' methods | No | Yes | NA |\n+---------------------------------------------------------------+---------------------------------------+--------------------------------+-------------------------------------------------------------------------------------------+\n\n# Relative Risk\n\n(Continuity-adjusted methods omitted for brevity)\n\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| Analysis of Two Independent Sample Proportions | Supported in R | Supported in SAS | Results Match |\n+================================================+=========================================+==========================+===============+\n| Normal approximation (Wald/Katz log Method) | Yes {ratesci} `rrci()` | Yes (default) | Yes |\n| | | | |\n| | Yes {DescTools} | | |\n| | | | |\n| | `BinomRatioCI(..,method=c(\"katz.log\"))` | | |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| Likelihood ratio | No | Yes | NA |\n| | | | |\n| | | `CL=LR` | |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| MOVER-R Wilson | Yes {ratesci} `rrci()` | No | NA |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| MOVER-R Jeffreys | Yes {ratesci} `rrci()` | No | NA |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| Miettinen-Nurminen Asymptotic Score | Yes {ratesci} `rrci()` | Yes ('Score') | Yes |\n| | | | |\n| | Yes {DescTools} | `CL=score` | |\n| | | | |\n| | `BinomRatioCI(..,method=c(\"mn\"))` | | |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| Koopman Asymptotic Score | Yes {ratesci} `rrci()` | Yes | Yes |\n| | | | |\n| | Yes {DescTools} | `CL=(score(correct=no))` | |\n| | | | |\n| | `BinomDiffCI(..,method=c(\"mn\"))` | | |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| Skewness-corrected Asymptotic Score | Yes {ratesci} `rrci()` | No | NA |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n| Continuity adjusted methods | Yes {ratesci} `rrci(..., cc=TRUE)` | No | NA |\n+------------------------------------------------+-----------------------------------------+--------------------------+---------------+\n\n# Odds Ratio\n\n(Continuity-adjusted methods omitted for brevity)\n\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| Analysis of Two Independent Sample Proportions | Supported in R | Supported in SAS | Results Match |\n+================================================+===========================+==========================+===============+\n| Normal approximation (Wald/Woolf logit Method) | Yes {ratesci} `orci()` | Yes (default) | Yes |\n| | | | |\n| | Yes {contingencytables} | | |\n| | | | |\n| | `Woolf_logit_CI_2x2()` | | |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| Likelihood ratio | No | Yes | NA |\n| | | | |\n| | | `CL=LR` | |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| Mid-P | Yes {contingencytables} | Yes | Yes |\n| | | | |\n| | `Cornfield_midP_CI_2x2()` | | |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| MOVER-R Wilson | Yes {ratesci} `orci()` | No | NA |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| MOVER-R Jeffreys | Yes {ratesci} `orci()` | No | NA |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| Miettinen-Nurminen Asymptotic Score | Yes {ratesci} `orci()` | Yes ('Score') | Yes |\n| | | | |\n| | | `CL=score` | |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| Uncorrected Asymptotic Score | Yes {ratesci} `orci()` | Yes | Yes |\n| | | | |\n| | | `CL=(score(correct=no))` | |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| Skewness-corrected Asymptotic Score | Yes {ratesci} `orci()` | No | NA |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n| Continuity adjusted methods | Yes {ratesci} | No | NA |\n| | | | |\n| | `orci(..., cc=TRUE)` | | |\n+------------------------------------------------+---------------------------+--------------------------+---------------+\n\n## Prerequisites: R Packages\n\nSee the [R page](https://psiaims.github.io/CAMIS/R/ci_for_prop.html) for more detail.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Example R packages required\n# pak::pak(\"petelaud/ratesci\") # development version from GitHub\nlibrary(ratesci)\n```\n:::\n\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_ci_for_prop/execute-results/html.json b/_freeze/Comp/r-sas_ci_for_prop/execute-results/html.json index 2f467c8cd..d460fbd5b 100644 --- a/_freeze/Comp/r-sas_ci_for_prop/execute-results/html.json +++ b/_freeze/Comp/r-sas_ci_for_prop/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "f75cd805c47f03a16db15cc840fad2ff", + "hash": "1203d113f7679a0f3f459b38184d4705", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS Confidence Intervals for a Proportion\"\n---\n\n## Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nFor more technical derivation of methods for a single proportion, see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html).\n\nThe tables below provide an overview of findings from R & SAS, for calculation of CIs for a Single Sample Proportion.\n\n## General Comparison Table For Single Sample Proportions\n\nSee the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html) and [R page](https://psiaims.github.io/CAMIS/R/ci_for_prop.html) for results showing a single set of data which has been run through both SAS and R.\n\n| Analysis of One Sample Proportion | Supported in R | Supported in SAS | Results Match |\n|----------------------------|-----------------|--------------|----------------|\n| Normal approximation (Wald Method) | Yes {cardx} / {ratesci} | Yes (default) | Yes |\n| Wilson (Score) method | Yes {cardx} / {ratesci} | Yes | Yes |\n| Agresti-Coull | Yes {cardx} / {ratesci} | Yes | Yes |\n| Jeffreys Bayesian 'equal-tailed' | Yes {cardx} / {ratesci} | Yes | Yes |\n| mid-P | Yes {ratesci} | Yes | Yes |\n| Wilson Stratified score | Yes {cardx} / {ratesci} | No | NA |\n| 'exact' and continuity adjusted methods: | | | |\n| Clopper-Pearson 'Exact' | Yes {cardx} / {ratesci} | Yes (default) | Yes |\n| Blaker 'exact' | Yes {ratesci} | Yes | Yes |\n| Normal approximation (Wald Method) with continuity correction | Yes {cardx} / {ratesci} | Yes | Yes |\n| Wilson (Score) method with continuity correction | Yes {cardx} / {ratesci} | Yes | Yes |\n\n## Prerequisites: R Packages\n\nSee the [R page](https://psiaims.github.io/CAMIS/R/ci_for_prop.html) for more detail.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Example R packages required\nlibrary(cardx)\nlibrary(ratesci)\n```\n:::\n\n", + "markdown": "---\ntitle: \"R vs SAS Confidence Intervals for a Proportion\"\n---\n\n## Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nFor more technical derivation of methods for a single proportion, see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html).\n\nThe tables below provide an overview of findings from R & SAS, for calculation of CIs for a Single Sample Proportion.\n\n## General Comparison Table For Single Sample Proportions\n\nSee the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html) and [R page](https://psiaims.github.io/CAMIS/R/ci_for_prop.html) for results showing a single set of data which has been run through both SAS and R.\n\n| Analysis of One Sample Proportion | Supported in R | Supported in SAS | Results Match |\n|----|----|----|----|\n| Normal approximation (Wald Method) | Yes {cardx} / {ratesci} | Yes (default) | Yes |\n| Wilson (Score) method | Yes {cardx} / {ratesci} | Yes | Yes |\n| Skewness-Corrected Asymptotic Score (SCAS) method | Yes {ratesci} | No | NA |\n| Agresti-Coull | Yes {cardx} / {ratesci} | Yes | Yes |\n| Jeffreys Bayesian 'equal-tailed' | Yes {cardx} / {ratesci} | Yes | Yes |\n| mid-P | Yes {ratesci} | Yes | Yes |\n| Wilson Stratified score | Yes {cardx} / {ratesci} | No | NA |\n| 'exact' and continuity adjusted methods: | | | |\n| Clopper-Pearson 'Exact' | Yes {cardx} / {ratesci} | Yes (default) | Yes |\n| Blaker 'exact' | Yes {ratesci} | Yes | Yes |\n| Normal approximation (Wald Method) with continuity adjustment | Yes {cardx} / {ratesci} | Yes | Yes |\n| Wilson (Score) method with continuity adjustment | Yes {cardx} / {ratesci} | Yes | Yes |\n| SCAS method with continuity adjustment | Yes {ratesci} | No | NA |\n\n## Prerequisites: R Packages\n\nSee the [R page](https://psiaims.github.io/CAMIS/R/ci_for_prop.html) for more detail.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Example R packages required\nlibrary(cardx)\nlibrary(ratesci)\n```\n:::\n\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_cmh/execute-results/html.json b/_freeze/Comp/r-sas_cmh/execute-results/html.json index a85271d5b..feb692c9f 100644 --- a/_freeze/Comp/r-sas_cmh/execute-results/html.json +++ b/_freeze/Comp/r-sas_cmh/execute-results/html.json @@ -2,10 +2,8 @@ "hash": "3bc26257ed656f9afc53b637fac77f36", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS CMH\"\necho: false\n---\n\n\n\n# Cochran-Mantel-Haenszel Test\n\nThe CMH procedure tests for conditional independence in partial contingency tables for a 2 x 2 x K design. However, it can be generalized to tables of X x Y x K dimensions.\n\n\n::: {.cell layout-align=\"center\"}\n\n```{.r .cell-code}\nknitr::include_graphics('../images/cmh/img.png')\n```\n\n::: {.cell-output-display}\n![](../images/cmh/img.png){fig-align='center' width=162}\n:::\n:::\n\n\n## Naming Convention\n\nFor the remainder of this document, we adopt the following naming convention when referring to variables of a contingency table:\n\n- X = exposure (often the treatment variable)\n\n- Y = response (the variable of interest)\n\n- K = control/strata (often a potential confounder you want to control for)\n\n## Scale\n\nThe `scale` of the exposure (X) and response (Y) variables dictate which test statistic is computed for the contingency table. Each test statistic is evaluated on different degrees of freedom (df):\n\n- `General association` statistic (X and Y both nominal) results in `(X-1) * (Y-1) dfs`\n\n- `Row mean` scores statistic (X is nominal and Y is ordinal) results in `X-1 dfs`\n\n- `Nonzero correlation` statistic (X and Y both ordinal) results in `1 df`\n\n# Testing Strategy\n\n## Data\n\nTo begin investigating the differences in the SAS and R implementations of the CMH test, we decided to use the CDISC Pilot data set, which is publicly available on the PHUSE Test Data Factory repository. We applied very basic filtering conditions upfront (see below) and this data set served as the basis of the examples to follow.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata <- read.csv(\"../data/adcibc.csv\")\nhead(data)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n X STUDYID SITEID SITEGR1 USUBJID TRTSDT TRTEDT\n1 1 CDISCPILOT01 701 701 01-701-1015 2014-01-02 2014-07-02\n2 2 CDISCPILOT01 701 701 01-701-1023 2012-08-05 2012-09-01\n3 3 CDISCPILOT01 701 701 01-701-1028 2013-07-19 2014-01-14\n4 4 CDISCPILOT01 701 701 01-701-1033 2014-03-18 2014-03-31\n5 5 CDISCPILOT01 701 701 01-701-1034 2014-07-01 2014-12-30\n6 6 CDISCPILOT01 701 701 01-701-1047 2013-02-12 2013-03-09\n TRTP TRTPN AGE AGEGR1 AGEGR1N RACE RACEN SEX ITTFL EFFFL\n1 Placebo 0 63 <65 1 WHITE 1 F Y Y\n2 Placebo 0 64 <65 1 WHITE 1 M Y Y\n3 Xanomeline High Dose 81 71 65-80 2 WHITE 1 M Y Y\n4 Xanomeline Low Dose 54 74 65-80 2 WHITE 1 M Y Y\n5 Xanomeline High Dose 81 77 65-80 2 WHITE 1 F Y Y\n6 Placebo 0 85 >80 3 WHITE 1 F Y Y\n COMP24FL AVISIT AVISITN VISIT VISITNUM ADY ADT PARAMCD\n1 Y Week 8 8 WEEK 8 8 63 2014-03-05 CIBICVAL\n2 N Week 8 8 WEEK 4 5 29 2012-09-02 CIBICVAL\n3 Y Week 8 8 WEEK 8 8 54 2013-09-10 CIBICVAL\n4 N Week 8 8 WEEK 4 5 28 2014-04-14 CIBICVAL\n5 Y Week 8 8 WEEK 8 8 57 2014-08-26 CIBICVAL\n6 N Week 8 8 AMBUL ECG REMOVAL 6 46 2013-03-29 CIBICVAL\n PARAM PARAMN AVAL ANL01FL DTYPE AWRANGE AWTARGET AWTDIFF AWLO AWHI AWU\n1 CIBIC Score 1 4 Y NA 2-84 56 7 2 84 DAYS\n2 CIBIC Score 1 3 Y NA 2-84 56 27 2 84 DAYS\n3 CIBIC Score 1 4 Y NA 2-84 56 2 2 84 DAYS\n4 CIBIC Score 1 4 Y NA 2-84 56 28 2 84 DAYS\n5 CIBIC Score 1 4 Y NA 2-84 56 1 2 84 DAYS\n6 CIBIC Score 1 4 Y NA 2-84 56 10 2 84 DAYS\n QSSEQ\n1 6001\n2 6001\n3 6001\n4 6001\n5 6001\n6 6001\n```\n\n\n:::\n:::\n\n\n## Schemes\n\nIn order to follow a systematic approach to testing, and to cover variations in the CMH test, we considered the traditional 2 x 2 x K design as well as scenarios where the generalized CMH test is employed (e.g. 5 x 3 x 3).\n\nWe present 6 test scenarios, some of which have sparse data.\n\n| Number | Schema (X x Y x K) | Variables | Relevant Test | Description |\n|---------------|---------------|---------------|---------------|---------------|\n| 1 | 2x2x2 | X = TRTP, Y = SEX, K = AGEGR1 | General Association | TRTP and AGEGR1 were limited to two categories (removing the low dose and \\>80 year group), overall the the groups were rather balanced |\n| 2 | 3x2x3 | X = TRTP, Y = SEX, K = AGEGR1 | General Association | TRTP and AGEGR1 each have 3 levels, SEX has 2 levels, overall the the groups were rather balanced |\n| 3 | 3x2x3 | X = TRTP, Y = SEX, K = RACE | General Association | One stratum of RACE has only n=1 |\n| 6 | 2x5x2 | X = TRTP, Y = AVAL, K = SEX | Row Means | Compare Row Means results because Y is ordinal |\n| 9 | 3x5x17 | X = TRTP, Y = AVAL, K = SITEID | Row Means | SITEID has many strata and provokes sparse groups; AVAL is ordinal, therefore row means statistic applies here |\n| 10 | 5x3x3 | X = AVAL, Y = AGEGR1N, K = TRTP | Correlation | X and Y are ordinal variables and therefore the correlation statistics can be used |\n\n# Results\n\n## CMH Statistics\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsas_results <- tribble(\n ~Scenario, ~Test, ~Chisq, ~Df, ~Prob,\n 1L ,\"Correlation\", 0.2166, 1, 0.6417,\n 1L ,\"Row Means\", 0.2166, 1, 0.6417,\n 1L ,\"General Association\", 0.2166, 1, 0.6417,\n \n 2L ,\"Correlation\", 0.0009, 1, 0.9765,\n 2L ,\"Row Means\", 2.4820, 1, 0.2891,\n 2L ,\"General Association\", 2.4820, 1, 0.2891,\n \n 3L ,\"Correlation\", 0.0028, 1, 0.9579,\n 3L ,\"Row Means\", 2.3861, 2, 0.3033, \n 3L ,\"General Association\", 2.3861, 2, 0.3033,\n \n 6L ,\"Correlation\", 1.7487, 1, 0.1860,\n 6L ,\"Row Means\", 1.7487, 1, 0.1860,\n 6L ,\"General Association\", 8.0534, 4, 0.0896,\n \n 9L ,\"Correlation\", 0.0854, 1, 0.7701,\n 9L ,\"Row Means\", 2.4763, 2, 0.2899,\n 9L ,\"General Association\", 7.0339, 8, 0.5330,\n \n 10L ,\"Correlation\", 1.6621, 1, 0.1973,\n 10L ,\"Row Means\", 2.2980, 4, 0.6811,\n 10L ,\"General Association\",5.7305, 8, 0.6774\n) |>\n mutate(lang = \"SAS\")\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(vcdExtra)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: vcd\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: grid\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: gnm\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nRegistered S3 method overwritten by 'vcdExtra':\n method from\n print.Kappa vcd \n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'vcdExtra'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:vcd':\n\n woolf_test\n```\n\n\n:::\n\n```{.r .cell-code}\ndata2 <- data |>\n filter(TRTPN != \"54\" & AGEGR1 != \">80\")\n\ns1 <- vcdExtra::CMHtest(\n Freq ~ TRTP + SEX | AGEGR1,\n data = data2,\n overall = TRUE\n)$ALL$table\n\ns2 <- vcdExtra::CMHtest(\n Freq ~ TRTP + SEX | AGEGR1,\n data = data,\n overall = TRUE\n)$ALL$table\n\ns3 <- vcdExtra::CMHtest(\n Freq ~ TRTP + SEX | RACE,\n data = data,\n overall = TRUE\n)$ALL$table\n\ns6 <- vcdExtra::CMHtest(\n Freq ~ TRTP + AVAL | SEX,\n data = data2,\n overall = TRUE\n)$ALL$table\n\n## Unable to run: For large sparse table (many strata) CMHTest will occasionally throw an error in solve.default(AVA) because of singularity\n# s9 <- vcdExtra::CMHtest(\n# Freq ~ TRTP + AVAL | SITEID,\n# data = data, overall = TRUE\n# )$ALL$table\n\ns10 <- vcdExtra::CMHtest(\n Freq ~ AVAL + AGEGR1N | TRTP,\n data = data,\n overall = TRUE\n)$ALL$table\n\n\n# Summarize the results\nr_results <- list(s1, s2, s3, s6, s10) |>\n map(function(x) {\n as_tibble(x) |>\n mutate(across(everything(), unlist), Test = rownames(x))\n }) |>\n reduce(bind_rows) |>\n mutate(\n Scenario = rep(c(1, 2, 3, 6, 10), each = 4),\n Test = case_when(\n Test == \"cor\" ~ \"Correlation\",\n Test == \"rmeans\" ~ \"Row Means\",\n Test == \"general\" ~ \"General Association\"\n ),\n lang = \"R\"\n ) |>\n filter(!is.na(Test))\n```\n:::\n\n\nAs can be seen, there are 2 scenarios where `vcdExtra` in R does not provide any results:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gt)\n\nbind_rows(sas_results, r_results) |>\n arrange(Scenario) |>\n pivot_wider(names_from = lang, values_from = c(\"Chisq\", \"Df\", \"Prob\")) |>\n gt(\n groupname_col = \"Scenario\"\n ) |>\n tab_spanner(\n label = \"Chi-Square\",\n columns = starts_with(\"Chisq\")\n ) |>\n tab_spanner(\n label = \"df\",\n columns = starts_with(\"Df\")\n ) |>\n tab_spanner(\n label = \"p-value\",\n columns = starts_with(\"Prob\")\n ) |>\n cols_label(\n Chisq_SAS = \"SAS\",\n Chisq_R = \"R\",\n Df_SAS = \"SAS\",\n Df_R = \"R\",\n Prob_SAS = \"SAS\",\n Prob_R = \"R\"\n ) |>\n tab_options(row_group.as_column = TRUE) |>\n tab_footnote(\n footnote = md(\n \"**Reason for NaN in scenario 3**: Stratum k = AMERICAN INDIAN OR ALASKA NATIVE has n=1.\"\n ),\n cells_row_groups(groups = \"3\"),\n placement = \"right\"\n ) |>\n tab_footnote(\n footnote = md(\n \"**Reason for in scenario 9:** For large sparse table (many strata) CMHTest will throw an error in solve.default(AVA) because of singularity\"\n ),\n cells_row_groups(groups = \"9\"),\n placement = \"right\"\n ) |>\n opt_footnote_marks(marks = \"standard\")\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Test\n
Chi-Square
\n
\n
df
\n
\n
p-value
\n
SASRSASRSASR
1Correlation0.21660.2165549886110.64170.64167748
Row Means0.21660.2165549886110.64170.64167748
General Association0.21660.2165549886110.64170.64167748
2Correlation0.00090.0008689711110.97650.97648311
Row Means2.48202.4820278527120.28910.28909095
General Association2.48202.4820278527120.28910.28909095
3*Correlation0.00280.0027871297110.95790.95789662
Row Means2.38612.3860698467220.30330.30329938
General Association2.38612.3860698467220.30330.30329938
6Correlation1.74871.7487003723110.18600.18604020
Row Means1.74871.7487003723110.18600.18604020
General Association8.05348.0533878514440.08960.08964199
9Correlation0.0854NA1NA0.7701NA
Row Means2.4763NA2NA0.2899NA
General Association7.0339NA8NA0.5330NA
10Correlation1.66211.6620500937110.19730.19732675
Row Means2.29802.2980213984440.68110.68112931
General Association5.73055.7305381934880.67740.67738613
* Reason for NaN in scenario 3: Stratum k = AMERICAN INDIAN OR ALASKA NATIVE has n=1.
Reason for in scenario 9: For large sparse table (many strata) CMHTest will throw an error in solve.default(AVA) because of singularity
\n
\n```\n\n:::\n:::\n\n\n# Summary and Recommendation\n\nHaving explored the available R packages to calculate the CMH statistics, the base `stats::mantelhaen.test()` function can be recommended for the classic CMH test in the 2 x 2 x K scenarios. The `vcdExtra` package shows matching results with SAS, however in cases with sparse data the `vcdExtra` package will not provide results.\n\nThe base `stats::mantelhaen.test()` function does return results in cases of sparse data (required n \\> 1 in each strata).\n\n# References\n\nAccessible Summary: \n\nAn Introduction to Categorical Data Analysis 2nd Edition (Agresti): \n\nSAS documentation (Specification): \n\nSAS documentation (Theoretical Basis + Formulas): \n\nOriginal Paper 1: \n\nOriginal Paper 2: ", - "supporting": [ - "r-sas_cmh_files" - ], + "markdown": "---\ntitle: \"R vs SAS CMH\"\necho: false\n---\n\n\n\n# Cochran-Mantel-Haenszel Test\n\nThe CMH procedure tests for conditional independence in partial contingency tables for a 2 x 2 x K design. However, it can be generalized to tables of X x Y x K dimensions.\n\n\n::: {.cell layout-align=\"center\"}\n\n```{.r .cell-code}\nknitr::include_graphics('../images/cmh/img.png')\n```\n\n::: {.cell-output-display}\n![](../images/cmh/img.png){fig-align='center' width=162}\n:::\n:::\n\n\n## Naming Convention\n\nFor the remainder of this document, we adopt the following naming convention when referring to variables of a contingency table:\n\n- X = exposure (often the treatment variable)\n\n- Y = response (the variable of interest)\n\n- K = control/strata (often a potential confounder you want to control for)\n\n## Scale\n\nThe `scale` of the exposure (X) and response (Y) variables dictate which test statistic is computed for the contingency table. Each test statistic is evaluated on different degrees of freedom (df):\n\n- `General association` statistic (X and Y both nominal) results in `(X-1) * (Y-1) dfs`\n\n- `Row mean` scores statistic (X is nominal and Y is ordinal) results in `X-1 dfs`\n\n- `Nonzero correlation` statistic (X and Y both ordinal) results in `1 df`\n\n# Testing Strategy\n\n## Data\n\nTo begin investigating the differences in the SAS and R implementations of the CMH test, we decided to use the CDISC Pilot data set, which is publicly available on the PHUSE Test Data Factory repository. We applied very basic filtering conditions upfront (see below) and this data set served as the basis of the examples to follow.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata <- read.csv(\"../data/adcibc.csv\")\nhead(data)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n X STUDYID SITEID SITEGR1 USUBJID TRTSDT TRTEDT\n1 1 CDISCPILOT01 701 701 01-701-1015 2014-01-02 2014-07-02\n2 2 CDISCPILOT01 701 701 01-701-1023 2012-08-05 2012-09-01\n3 3 CDISCPILOT01 701 701 01-701-1028 2013-07-19 2014-01-14\n4 4 CDISCPILOT01 701 701 01-701-1033 2014-03-18 2014-03-31\n5 5 CDISCPILOT01 701 701 01-701-1034 2014-07-01 2014-12-30\n6 6 CDISCPILOT01 701 701 01-701-1047 2013-02-12 2013-03-09\n TRTP TRTPN AGE AGEGR1 AGEGR1N RACE RACEN SEX ITTFL EFFFL\n1 Placebo 0 63 <65 1 WHITE 1 F Y Y\n2 Placebo 0 64 <65 1 WHITE 1 M Y Y\n3 Xanomeline High Dose 81 71 65-80 2 WHITE 1 M Y Y\n4 Xanomeline Low Dose 54 74 65-80 2 WHITE 1 M Y Y\n5 Xanomeline High Dose 81 77 65-80 2 WHITE 1 F Y Y\n6 Placebo 0 85 >80 3 WHITE 1 F Y Y\n COMP24FL AVISIT AVISITN VISIT VISITNUM ADY ADT PARAMCD\n1 Y Week 8 8 WEEK 8 8 63 2014-03-05 CIBICVAL\n2 N Week 8 8 WEEK 4 5 29 2012-09-02 CIBICVAL\n3 Y Week 8 8 WEEK 8 8 54 2013-09-10 CIBICVAL\n4 N Week 8 8 WEEK 4 5 28 2014-04-14 CIBICVAL\n5 Y Week 8 8 WEEK 8 8 57 2014-08-26 CIBICVAL\n6 N Week 8 8 AMBUL ECG REMOVAL 6 46 2013-03-29 CIBICVAL\n PARAM PARAMN AVAL ANL01FL DTYPE AWRANGE AWTARGET AWTDIFF AWLO AWHI AWU\n1 CIBIC Score 1 4 Y NA 2-84 56 7 2 84 DAYS\n2 CIBIC Score 1 3 Y NA 2-84 56 27 2 84 DAYS\n3 CIBIC Score 1 4 Y NA 2-84 56 2 2 84 DAYS\n4 CIBIC Score 1 4 Y NA 2-84 56 28 2 84 DAYS\n5 CIBIC Score 1 4 Y NA 2-84 56 1 2 84 DAYS\n6 CIBIC Score 1 4 Y NA 2-84 56 10 2 84 DAYS\n QSSEQ\n1 6001\n2 6001\n3 6001\n4 6001\n5 6001\n6 6001\n```\n\n\n:::\n:::\n\n\n## Schemes\n\nIn order to follow a systematic approach to testing, and to cover variations in the CMH test, we considered the traditional 2 x 2 x K design as well as scenarios where the generalized CMH test is employed (e.g. 5 x 3 x 3).\n\nWe present 6 test scenarios, some of which have sparse data.\n\n| Number | Schema (X x Y x K) | Variables | Relevant Test | Description |\n|---------------|---------------|---------------|---------------|---------------|\n| 1 | 2x2x2 | X = TRTP, Y = SEX, K = AGEGR1 | General Association | TRTP and AGEGR1 were limited to two categories (removing the low dose and \\>80 year group), overall the the groups were rather balanced |\n| 2 | 3x2x3 | X = TRTP, Y = SEX, K = AGEGR1 | General Association | TRTP and AGEGR1 each have 3 levels, SEX has 2 levels, overall the the groups were rather balanced |\n| 3 | 3x2x3 | X = TRTP, Y = SEX, K = RACE | General Association | One stratum of RACE has only n=1 |\n| 6 | 2x5x2 | X = TRTP, Y = AVAL, K = SEX | Row Means | Compare Row Means results because Y is ordinal |\n| 9 | 3x5x17 | X = TRTP, Y = AVAL, K = SITEID | Row Means | SITEID has many strata and provokes sparse groups; AVAL is ordinal, therefore row means statistic applies here |\n| 10 | 5x3x3 | X = AVAL, Y = AGEGR1N, K = TRTP | Correlation | X and Y are ordinal variables and therefore the correlation statistics can be used |\n\n# Results\n\n## CMH Statistics\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsas_results <- tribble(\n ~Scenario, ~Test, ~Chisq, ~Df, ~Prob,\n 1L ,\"Correlation\", 0.2166, 1, 0.6417,\n 1L ,\"Row Means\", 0.2166, 1, 0.6417,\n 1L ,\"General Association\", 0.2166, 1, 0.6417,\n \n 2L ,\"Correlation\", 0.0009, 1, 0.9765,\n 2L ,\"Row Means\", 2.4820, 1, 0.2891,\n 2L ,\"General Association\", 2.4820, 1, 0.2891,\n \n 3L ,\"Correlation\", 0.0028, 1, 0.9579,\n 3L ,\"Row Means\", 2.3861, 2, 0.3033, \n 3L ,\"General Association\", 2.3861, 2, 0.3033,\n \n 6L ,\"Correlation\", 1.7487, 1, 0.1860,\n 6L ,\"Row Means\", 1.7487, 1, 0.1860,\n 6L ,\"General Association\", 8.0534, 4, 0.0896,\n \n 9L ,\"Correlation\", 0.0854, 1, 0.7701,\n 9L ,\"Row Means\", 2.4763, 2, 0.2899,\n 9L ,\"General Association\", 7.0339, 8, 0.5330,\n \n 10L ,\"Correlation\", 1.6621, 1, 0.1973,\n 10L ,\"Row Means\", 2.2980, 4, 0.6811,\n 10L ,\"General Association\",5.7305, 8, 0.6774\n) |>\n mutate(lang = \"SAS\")\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(vcdExtra)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: vcd\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: grid\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: gnm\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nRegistered S3 method overwritten by 'vcdExtra':\n method from\n print.Kappa vcd \n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'vcdExtra'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:vcd':\n\n woolf_test\n```\n\n\n:::\n\n```{.r .cell-code}\ndata2 <- data |>\n filter(TRTPN != \"54\" & AGEGR1 != \">80\")\n\ns1 <- vcdExtra::CMHtest(\n Freq ~ TRTP + SEX | AGEGR1,\n data = data2,\n overall = TRUE\n)$ALL$table\n\ns2 <- vcdExtra::CMHtest(\n Freq ~ TRTP + SEX | AGEGR1,\n data = data,\n overall = TRUE\n)$ALL$table\n\ns3 <- vcdExtra::CMHtest(\n Freq ~ TRTP + SEX | RACE,\n data = data,\n overall = TRUE\n)$ALL$table\n\ns6 <- vcdExtra::CMHtest(\n Freq ~ TRTP + AVAL | SEX,\n data = data2,\n overall = TRUE\n)$ALL$table\n\n## Unable to run: For large sparse table (many strata) CMHTest will occasionally throw an error in solve.default(AVA) because of singularity\n# s9 <- vcdExtra::CMHtest(\n# Freq ~ TRTP + AVAL | SITEID,\n# data = data, overall = TRUE\n# )$ALL$table\n\ns10 <- vcdExtra::CMHtest(\n Freq ~ AVAL + AGEGR1N | TRTP,\n data = data,\n overall = TRUE\n)$ALL$table\n\n\n# Summarize the results\nr_results <- list(s1, s2, s3, s6, s10) |>\n map(function(x) {\n as_tibble(x) |>\n mutate(across(everything(), unlist), Test = rownames(x))\n }) |>\n reduce(bind_rows) |>\n mutate(\n Scenario = rep(c(1, 2, 3, 6, 10), each = 4),\n Test = case_when(\n Test == \"cor\" ~ \"Correlation\",\n Test == \"rmeans\" ~ \"Row Means\",\n Test == \"general\" ~ \"General Association\"\n ),\n lang = \"R\"\n ) |>\n filter(!is.na(Test))\n```\n:::\n\n\nAs can be seen, there are 2 scenarios where `vcdExtra` in R does not provide any results:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gt)\n\nbind_rows(sas_results, r_results) |>\n arrange(Scenario) |>\n pivot_wider(names_from = lang, values_from = c(\"Chisq\", \"Df\", \"Prob\")) |>\n gt(\n groupname_col = \"Scenario\"\n ) |>\n tab_spanner(\n label = \"Chi-Square\",\n columns = starts_with(\"Chisq\")\n ) |>\n tab_spanner(\n label = \"df\",\n columns = starts_with(\"Df\")\n ) |>\n tab_spanner(\n label = \"p-value\",\n columns = starts_with(\"Prob\")\n ) |>\n cols_label(\n Chisq_SAS = \"SAS\",\n Chisq_R = \"R\",\n Df_SAS = \"SAS\",\n Df_R = \"R\",\n Prob_SAS = \"SAS\",\n Prob_R = \"R\"\n ) |>\n tab_options(row_group.as_column = TRUE) |>\n tab_footnote(\n footnote = md(\n \"**Reason for NaN in scenario 3**: Stratum k = AMERICAN INDIAN OR ALASKA NATIVE has n=1.\"\n ),\n cells_row_groups(groups = \"3\"),\n placement = \"right\"\n ) |>\n tab_footnote(\n footnote = md(\n \"**Reason for in scenario 9:** For large sparse table (many strata) CMHTest will throw an error in solve.default(AVA) because of singularity\"\n ),\n cells_row_groups(groups = \"9\"),\n placement = \"right\"\n ) |>\n opt_footnote_marks(marks = \"standard\")\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Test\n
Chi-Square
\n
\n
df
\n
\n
p-value
\n
SASRSASRSASR
1Correlation0.21660.2165549886110.64170.64167748
Row Means0.21660.2165549886110.64170.64167748
General Association0.21660.2165549886110.64170.64167748
2Correlation0.00090.0008689711110.97650.97648311
Row Means2.48202.4820278527120.28910.28909095
General Association2.48202.4820278527120.28910.28909095
3*Correlation0.00280.0027871297110.95790.95789662
Row Means2.38612.3860698467220.30330.30329938
General Association2.38612.3860698467220.30330.30329938
6Correlation1.74871.7487003723110.18600.18604020
Row Means1.74871.7487003723110.18600.18604020
General Association8.05348.0533878514440.08960.08964199
9Correlation0.0854NA1NA0.7701NA
Row Means2.4763NA2NA0.2899NA
General Association7.0339NA8NA0.5330NA
10Correlation1.66211.6620500937110.19730.19732675
Row Means2.29802.2980213984440.68110.68112931
General Association5.73055.7305381934880.67740.67738613
* Reason for NaN in scenario 3: Stratum k = AMERICAN INDIAN OR ALASKA NATIVE has n=1.
Reason for in scenario 9: For large sparse table (many strata) CMHTest will throw an error in solve.default(AVA) because of singularity
\n
\n```\n\n:::\n:::\n\n\n# Summary and Recommendation\n\nHaving explored the available R packages to calculate the CMH statistics, the base `stats::mantelhaen.test()` function can be recommended for the classic CMH test in the 2 x 2 x K scenarios. The `vcdExtra` package shows matching results with SAS, however in cases with sparse data the `vcdExtra` package will not provide results.\n\nThe base `stats::mantelhaen.test()` function does return results in cases of sparse data (required n \\> 1 in each strata).\n\n# References\n\nAccessible Summary: \n\nAn Introduction to Categorical Data Analysis 2nd Edition (Agresti): \n\nSAS documentation (Specification): \n\nSAS documentation (Theoretical Basis + Formulas): \n\nOriginal Paper 1: \n\nOriginal Paper 2: ", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/Comp/r-sas_friedman/execute-results/html.json b/_freeze/Comp/r-sas_friedman/execute-results/html.json index 8525a8f85..d1695b250 100644 --- a/_freeze/Comp/r-sas_friedman/execute-results/html.json +++ b/_freeze/Comp/r-sas_friedman/execute-results/html.json @@ -1,9 +1,11 @@ { - "hash": "bf0d6a74a589870b80a36f17c7982235", + "hash": "c310d2c8900a292c24f3bbe1ff0d3865", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS Non-parametric Analysis - Friedman test\"\nexecute: \n eval: false\n---\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(rstatix)\nlibrary(ggpubr)\n```\n:::\n\n\n## Data used\n\nFriedman's test is used when you have one within-subjects independent variable with two or more levels and a dependent variable that is not interval and normally distributed (but at least ordinal). To build such unreplicated blocked data, we'll create a data frame called  `df_bp` from random number. In  `df_bp` : dependent variable `bp` is randomly generated; Block: `subjid` ; Group: `time_point`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(123)\n\ndf_bp = data.frame(bp = runif(n = 50, 138, 200)) |>\n mutate(\n subjid = as.factor(row_number() %% 5),\n time_point = as.factor((row_number() - 1) %/% 5 + 1)\n )\n\nhead(df_bp)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n bp subjid time_point\n1 155.8298 1 1\n2 186.8749 2 1\n3 163.3566 3 1\n4 192.7471 4 1\n5 196.3090 0 1\n6 140.8245 1 2\n```\n\n\n:::\n:::\n\n\nLet's see distribution of `df_bp`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggpubr::ggboxplot(df_bp, x = \"time_point\", y = \"bp\", add = \"jitter\")\n```\n:::\n\n\n## Example Code using {rstatix}\n\nIn R, **friedman_test** can be used to compare multiple means of rank in `bp` grouped by `time_point`, stratified by `subjid`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres.fried <- df_bp |>\n friedman_test(bp ~ time_point | subjid)\nres.fried\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n .y. n statistic df p method \n* \n1 bp 5 10.9 9 0.284 Friedman test\n```\n\n\n:::\n:::\n\n\n## Example Code using {PROC FREQ}\n\nIn SAS, **CMH2** option of PROC FREQ is used to perform Friedman's test.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data=data_bp;\n tables patient*dos*bp / \n cmh2 scores=rank noprint;\nrun;\n```\n:::\n\n\n## Comparison\n\nThe Row Mean Scores Differ statistic of SAS result is compared with statistic of R result, together with *p*-value.\n\n+---------------+----------------------------------------------------------------------------------+---------------------------------------------------------------------------+---------------+\n| Analysis | Supported in R | Supported in SAS | Results Match |\n+===============+==================================================================================+===========================================================================+===============+\n| Friedman Test | Yes | Yes![](../images/friedman/Friedman_SAS.png){fig-align=\"left\" width=\"216\"} | Yes |\n| | | | |\n| | ![](../images/friedman/Friedman_R.png){fig-align=\"left\" width=\"221\" height=\"31\"} | | |\n+---------------+----------------------------------------------------------------------------------+---------------------------------------------------------------------------+---------------+\n\n## Comparison Results from more data\n\nFriedman's chi-suqare approximation varies when the number of blocks or the number of groups in the randomized block design differs. Similar comparison is done when number of block `subjid` ranges from 4 to 20 and number of group `time_point` ranges from 2 to 6. All results yield exact match (Comparison criterion is set to the tenth decimal place).\n\n# Summary and Recommendation\n\nThe R friedman test is comparable to SAS. Comparison between SAS and R show identical results for the datasets tried. The **rstatix** package `friedman_test()` function is very similar to SAS in the output produced.\n\n# References\n\nR `friedman_test()` documentation: \n\nSAS `PROC FREQ` documentation: \n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-24\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P rstatix * 0.7.3 2025-10-18 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", - "supporting": [], + "markdown": "---\ntitle: \"R vs SAS Non-parametric Analysis - Friedman test\"\n---\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(rstatix)\nlibrary(ggpubr)\n```\n:::\n\n\n## Data used\n\nFriedman's test is used when you have one within-subjects independent variable with two or more levels and a dependent variable that is not interval and normally distributed (but at least ordinal). To build such unreplicated blocked data, we'll create a data frame called  `df_bp` from random number. In  `df_bp` : dependent variable `bp` is randomly generated; Block: `subjid` ; Group: `time_point`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(123)\n\ndf_bp = data.frame(bp = runif(n = 50, 138, 200)) |>\n mutate(\n subjid = as.factor(row_number() %% 5),\n time_point = as.factor((row_number() - 1) %/% 5 + 1)\n )\n\nhead(df_bp)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n bp subjid time_point\n1 155.8298 1 1\n2 186.8749 2 1\n3 163.3566 3 1\n4 192.7471 4 1\n5 196.3090 0 1\n6 140.8245 1 2\n```\n\n\n:::\n:::\n\n\nLet's see distribution of `df_bp`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggpubr::ggboxplot(df_bp, x = \"time_point\", y = \"bp\", add = \"jitter\")\n```\n\n::: {.cell-output-display}\n![](r-sas_friedman_files/figure-html/unnamed-chunk-2-1.png){width=672}\n:::\n:::\n\n\n## Example Code using {rstatix}\n\nIn R, **friedman_test** can be used to compare multiple means of rank in `bp` grouped by `time_point`, stratified by `subjid`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres.fried <- df_bp |>\n friedman_test(bp ~ time_point | subjid)\nres.fried\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n .y. n statistic df p method \n* \n1 bp 5 10.9 9 0.284 Friedman test\n```\n\n\n:::\n:::\n\n\n## Example Code using {PROC FREQ}\n\nIn SAS, **CMH2** option of PROC FREQ is used to perform Friedman's test.\n\n```sas\nproc freq data=data_bp;\n tables patient*dos*bp / \n cmh2 scores=rank noprint;\nrun;\n```\n\n## Comparison\n\nThe Row Mean Scores Differ statistic of SAS result is compared with statistic of R result, together with *p*-value.\n\n+---------------+----------------------------------------------------------------------------------+---------------------------------------------------------------------------+---------------+\n| Analysis | Supported in R | Supported in SAS | Results Match |\n+===============+==================================================================================+===========================================================================+===============+\n| Friedman Test | Yes | Yes![](../images/friedman/Friedman_SAS.png){fig-align=\"left\" width=\"216\"} | Yes |\n| | | | |\n| | ![](../images/friedman/Friedman_R.png){fig-align=\"left\" width=\"221\" height=\"31\"} | | |\n+---------------+----------------------------------------------------------------------------------+---------------------------------------------------------------------------+---------------+\n\n## Comparison Results from more data\n\nFriedman's chi-suqare approximation varies when the number of blocks or the number of groups in the randomized block design differs. Similar comparison is done when number of block `subjid` ranges from 4 to 20 and number of group `time_point` ranges from 2 to 6. All results yield exact match (Comparison criterion is set to the tenth decimal place).\n\n# Summary and Recommendation\n\nThe R friedman test is comparable to SAS. Comparison between SAS and R show identical results for the datasets tried. The **rstatix** package `friedman_test()` function is very similar to SAS in the output produced.\n\n# References\n\nR `friedman_test()` documentation: \n\nSAS `PROC FREQ` documentation: \n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P rstatix * 0.7.3 2025-10-18 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", + "supporting": [ + "r-sas_friedman_files" + ], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/Comp/r-sas_friedman/figure-html/unnamed-chunk-2-1.png b/_freeze/Comp/r-sas_friedman/figure-html/unnamed-chunk-2-1.png new file mode 100644 index 000000000..d5bf94aea Binary files /dev/null and b/_freeze/Comp/r-sas_friedman/figure-html/unnamed-chunk-2-1.png differ diff --git a/_freeze/Comp/r-sas_gee/execute-results/html.json b/_freeze/Comp/r-sas_gee/execute-results/html.json index 05676941a..9006d93e6 100644 --- a/_freeze/Comp/r-sas_gee/execute-results/html.json +++ b/_freeze/Comp/r-sas_gee/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "318fb4d6eaaafeae61c6078619944dab", + "hash": "c06fb311c7e741430f30f160116d67b8", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Generalized Estimating Equations (GEE) methods\"\nexecute: \n eval: false\n---\n\n# Comparison of SAS vs R\n\n# BINARY OUTCOME\n\nFor dichotomous response variables, the link functions is the probit (in case of rare events complementary log-log may be preferable). For outcomes with more than two categories, the cumulative link function is used in case of ordinal variables and generalized logit for nominal variables.\n\nIn SAS, PROC GEE or PROC GENMOD can be used to compute GEE models. In R, GEE models can be fitted using `geepack::geeglm` or `gee::gee.`\n\nEstimated probabilities and odds ratios (OR) can be obtained in SAS by adding an `LSMEANS` statement, and in R by using an additional function with results from `geepack::geeglm`. For models fitted with `gee::gee`, the `emmeans` package is not supported.\n\nThe table below summarizes options and details for each procedure/function:\n\n| **Procedure/Function** | geepack::geeglm | **gee:gee** | PROC GEE/GENMOD |\n|------------------|------------------|-------------------|------------------|\n| Outcome variable | Numeric (0, 1) | Factor | Numeric or character in `class` statement |\n| Correlation matrix (default) | Independence | Independence | Independence |\n| Correlation matrix (options) | independence, exchangeable, ar1, unstructured and user defined. | independence, fixed, stat_M_dep, non_stat_M_dep, exchangeable, AR-M and unstructured. | independence, ar, exchangeable, mdep, unstructured and user defined. |\n| Outcome variable | Numeric (0, 1) | Factor | Numeric or factor, in `class` statement |\n| Sandwich SE | By default | By default | By default |\n| Model-based (naive) SE | No | By default | `modelse` option in `repeated` statement |\n| Link functions | probit, logit | probit, logit | probit, logit, clogit and glogit |\n| Estimated probability of event | additional function `emmeans::emmeans` | `emmeans::emmeans` not supported | `LSMEANS` statement with `ilink` option |\n| Odds Ratio (OR) | additional function `emmeans::emmeans` | `emmeans::emmeans` not supported | `LSMEANS` statement with `exp` or `oddsraio`option |\n\nA comparison between SAS (using PROC GEE) and the R functions using data available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm), and similar results were found across software using similar options (See R and SAS sections).\n\nNote small differences may be found in later decimal places (\\>9 decimal position) in results obtained with PROC GENMOD and PROC GEE. Similarly, differences in later decimals may be found across R functions.\n\n# OUTCOME WITH MORE THAN TWO CATEGORIES\n\nModels with cumulative logit link functions apply to ordinal data and generalized logit models are fit to nominal data.\n\nIn SAS, similar syntax used for GEE models can be applied by specifying a multinomial distribution and selecting the appropriate link function. In R, the `multgee`package provides two functions for estimating GEE models when the outcome has more than two categories: `ordLORgee` for ordinal variables and `nomLORgee` for nominal variables.\n\n| **Procedure/Function** | multgee:ordLORgee | multgee:nomLORgee | PROC GEE/PROC GENMOD | PROC GEE/GENMOD |\n|---------------|---------------|---------------|---------------|---------------|\n| Correlation matrix (default) | Exchangeable | Independence | Independence | |\n| Correlation matrix (options) | independence, uniform, exchangeable, time.exch, fixed | independence, exchangeable, RC or fixed | Independence^(1)^ | |\n| Link function | clogit (implicit - not user configurable) | glogit (implicit - not user configurable) | glogit, glogit^(2)^ | |\n| Model-based (naive) SE | No | By default | `modelse` option in `repeated` statement | |\n\n(1) For multinomial responses, SAS limits the correlation matrix type to `independent,` so other correlation matrix options are not supported.\n\n(2) Generalized logit is available in PROC GEE, but not in PROC GENMOD.\n\nThe same data analyzed in the corresponding section for SAS and R are analyzed to compare results using equivalent settings, i.e.: specifying 'independence' correlation matrix in R to match SAS.\n\nSAS results were stored in a dataset and reformatted to align with R output in terms of order and decimal precision, making visual comparison easier.\n\n### Ordinal variable - Example\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc genmod data=resp ;\nclass trtp(ref=\"P\") avisitn(ref='1') usubjid ; \nmodel respord=trtp avisitn trtp*avisitn / dist=multinomial link=cumlogit ; \nrepeated subject=usubjid /corr=ind; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nods output GEEEmpPEst=GEEEmpPEst_ord ;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/r_sas_1_ordinal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr = \"independence\")\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR ORDINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Cumulative logit \n\nLocal Odds Ratios:\nStructure: independence\n\ncall:\nmultgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"independence\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n-0.4698929 -0.3568933 -0.2853896 0.0002563 0.6339985 0.7277428 \n\nNumber of Iterations: 1 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|) \nbeta10 -0.55049 0.24303 -2.2652 0.02350 *\nbeta20 0.62726 0.25615 2.4488 0.01433 *\ntrtpA 0.42992 0.34205 1.2569 0.20880 \navisitn2 0.00108 0.36663 0.0030 0.99765 \navisitn3 0.11733 0.32268 0.3636 0.71616 \navisitn4 -0.15048 0.33263 -0.4524 0.65099 \ntrtpA:avisitn2 -0.27197 0.50959 -0.5337 0.59355 \ntrtpA:avisitn3 -0.58563 0.46208 -1.2674 0.20503 \ntrtpA:avisitn4 -0.31782 0.44813 -0.7092 0.47819 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0 0 1 1 1 1 1 1\n[2,] 0 0 1 1 1 1 1 1\n[3,] 1 1 0 0 1 1 1 1\n[4,] 1 1 0 0 1 1 1 1\n[5,] 1 1 1 1 0 0 1 1\n[6,] 1 1 1 1 0 0 1 1\n[7,] 1 1 1 1 1 1 0 0\n[8,] 1 1 1 1 1 1 0 0\n\np-value of Null model: 0.68672 \n```\n\n\n:::\n:::\n\n\n### Nominal variable - Example\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc gee data=resp ;\nclass trtp(ref=\"P\") avisitn(ref='1') usubjid respnom(ref='Lung'); \nmodel respnom=trtp avisitn trtp*avisitn/ dist=multinomial link=glogit ; \nrepeated subject=usubjid /corr=ind /*modelse*/; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nods output GEEEmpPEst=GEEEmpPEst_nom ;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/r_sas_2_nominal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr = \"independence\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in vglm.fitter(x = x, y = y, w = w, offset = offset, Xm2 = Xm2, : some\nquantities such as z, residuals, SEs may be inaccurate due to convergence at a\nhalf-step\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR NOMINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Baseline Category Logit \n\nLocal Odds Ratios:\nStructure: independence\nHomogenous scores: TRUE\n\ncall:\nmultgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"independence\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n-0.4444 -0.3684 -0.3333 0.0000 0.6111 0.7778 \n\nNumber of Iterations: 1 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|)\nbeta10 0.22314 0.33541 0.6653 0.5059\ntrtpA:1 -0.62861 0.50139 -1.2537 0.2099\navisitn2:1 0.18232 0.43095 0.4231 0.6722\navisitn3:1 0.01325 0.47862 0.0277 0.9779\navisitn4:1 -0.06899 0.48140 -0.1433 0.8860\ntrtpA:avisitn2:1 0.16252 0.63421 0.2563 0.7977\ntrtpA:avisitn3:1 0.95184 0.69786 1.3639 0.1726\ntrtpA:avisitn4:1 0.64631 0.70063 0.9225 0.3563\nbeta20 0.27193 0.33184 0.8195 0.4125\ntrtpA:2 0.01575 0.45535 0.0346 0.9724\navisitn2:2 0.18005 0.42536 0.4233 0.6721\navisitn3:2 0.15551 0.46598 0.3337 0.7386\navisitn4:2 -0.27193 0.50787 -0.5354 0.5924\ntrtpA:avisitn2:2 -0.25642 0.60253 -0.4256 0.6704\ntrtpA:avisitn3:2 0.11642 0.64694 0.1800 0.8572\ntrtpA:avisitn4:2 0.15610 0.63994 0.2439 0.8073\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0 0 1 1 1 1 1 1\n[2,] 0 0 1 1 1 1 1 1\n[3,] 1 1 0 0 1 1 1 1\n[4,] 1 1 0 0 1 1 1 1\n[5,] 1 1 1 1 0 0 1 1\n[6,] 1 1 1 1 0 0 1 1\n[7,] 1 1 1 1 1 1 0 0\n[8,] 1 1 1 1 1 1 0 0\n\np-value of Null model: 0.28295 \n```\n\n\n:::\n:::\n\n\n# REFERENCES\n\n\\[1\\] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n\\[2\\] [SAS/STAT® 13.1 User's Guide The GEE Procedure.](https://support.sas.com/documentation/onlinedoc/stat/141/gee.pdf)\n\n\\[3\\] [SAS/STAT® 13.1 User's Guide The GENMOD Procedure.](https://support.sas.com/documentation/onlinedoc/stat/131/genmod.pdf)\n\n\\[4\\] [Generalized Estimating Equation Package](https://cran.r-project.org/web/packages/geepack/geepack.pdf)\n\n\\[5\\] [Generalized Estimation Equation Solver](https://cran.r-project.org/web/packages/gee/gee.pdf)\n\n\\[6\\] [Touloumis A. (2015). \"R Package multgee: A Generalized Estimating Equations Solver for Multinomial Responses.\" Journal of Statistical Software.](https://www.jstatsoft.org/article/view/v064i08)", + "markdown": "---\ntitle: \"Generalized Estimating Equations (GEE) methods\"\n---\n\n# Comparison of SAS vs R\n\n# BINARY OUTCOME\n\nFor dichotomous response variables, the link functions is the probit (in case of rare events complementary log-log may be preferable). For outcomes with more than two categories, the cumulative link function is used in case of ordinal variables and generalized logit for nominal variables.\n\nIn SAS, PROC GEE or PROC GENMOD can be used to compute GEE models. In R, GEE models can be fitted using `geepack::geeglm` or `gee::gee.`\n\nEstimated probabilities and odds ratios (OR) can be obtained in SAS by adding an `LSMEANS` statement, and in R by using an additional function with results from `geepack::geeglm`. For models fitted with `gee::gee`, the `emmeans` package is not supported.\n\nThe table below summarizes options and details for each procedure/function:\n\n| **Procedure/Function** | geepack::geeglm | **gee:gee** | PROC GEE/GENMOD |\n|------------------|------------------|-------------------|------------------|\n| Outcome variable | Numeric (0, 1) | Factor | Numeric or character in `class` statement |\n| Correlation matrix (default) | Independence | Independence | Independence |\n| Correlation matrix (options) | independence, exchangeable, ar1, unstructured and user defined. | independence, fixed, stat_M_dep, non_stat_M_dep, exchangeable, AR-M and unstructured. | independence, ar, exchangeable, mdep, unstructured and user defined. |\n| Outcome variable | Numeric (0, 1) | Factor | Numeric or factor, in `class` statement |\n| Sandwich SE | By default | By default | By default |\n| Model-based (naive) SE | No | By default | `modelse` option in `repeated` statement |\n| Link functions | probit, logit | probit, logit | probit, logit, clogit and glogit |\n| Estimated probability of event | additional function `emmeans::emmeans` | `emmeans::emmeans` not supported | `LSMEANS` statement with `ilink` option |\n| Odds Ratio (OR) | additional function `emmeans::emmeans` | `emmeans::emmeans` not supported | `LSMEANS` statement with `exp` or `oddsraio`option |\n\nA comparison between SAS (using PROC GEE) and the R functions using data available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm), and similar results were found across software using similar options (See R and SAS sections).\n\nNote small differences may be found in later decimal places (\\>9 decimal position) in results obtained with PROC GENMOD and PROC GEE. Similarly, differences in later decimals may be found across R functions.\n\n# OUTCOME WITH MORE THAN TWO CATEGORIES\n\nModels with cumulative logit link functions apply to ordinal data and generalized logit models are fit to nominal data.\n\nIn SAS, similar syntax used for GEE models can be applied by specifying a multinomial distribution and selecting the appropriate link function. In R, the `multgee`package provides two functions for estimating GEE models when the outcome has more than two categories: `ordLORgee` for ordinal variables and `nomLORgee` for nominal variables.\n\n| **Procedure/Function** | multgee:ordLORgee | multgee:nomLORgee | PROC GEE/PROC GENMOD | PROC GEE/GENMOD |\n|---------------|---------------|---------------|---------------|---------------|\n| Correlation matrix (default) | Exchangeable | Independence | Independence | |\n| Correlation matrix (options) | independence, uniform, exchangeable, time.exch, fixed | independence, exchangeable, RC or fixed | Independence^(1)^ | |\n| Link function | clogit (implicit - not user configurable) | glogit (implicit - not user configurable) | glogit, glogit^(2)^ | |\n| Model-based (naive) SE | No | By default | `modelse` option in `repeated` statement | |\n\n(1) For multinomial responses, SAS limits the correlation matrix type to `independent,` so other correlation matrix options are not supported.\n\n(2) Generalized logit is available in PROC GEE, but not in PROC GENMOD.\n\nThe same data analyzed in the corresponding section for SAS and R are analyzed to compare results using equivalent settings, i.e.: specifying 'independence' correlation matrix in R to match SAS.\n\nSAS results were stored in a dataset and reformatted to align with R output in terms of order and decimal precision, making visual comparison easier.\n\n### Ordinal variable - Example\n\n```sas\nproc genmod data=resp ;\nclass trtp(ref=\"P\") avisitn(ref='1') usubjid ; \nmodel respord=trtp avisitn trtp*avisitn / dist=multinomial link=cumlogit ; \nrepeated subject=usubjid /corr=ind; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nods output GEEEmpPEst=GEEEmpPEst_ord ;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/r_sas_1_ordinal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr = \"independence\")\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR ORDINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Cumulative logit \n\nLocal Odds Ratios:\nStructure: independence\n\ncall:\nmultgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"independence\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n-0.4698929 -0.3568933 -0.2853896 0.0002563 0.6339985 0.7277428 \n\nNumber of Iterations: 1 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|) \nbeta10 -0.55049 0.24303 -2.2652 0.02350 *\nbeta20 0.62726 0.25615 2.4488 0.01433 *\ntrtpA 0.42992 0.34205 1.2569 0.20880 \navisitn2 0.00108 0.36663 0.0030 0.99765 \navisitn3 0.11733 0.32268 0.3636 0.71616 \navisitn4 -0.15048 0.33263 -0.4524 0.65099 \ntrtpA:avisitn2 -0.27197 0.50959 -0.5337 0.59355 \ntrtpA:avisitn3 -0.58563 0.46208 -1.2674 0.20503 \ntrtpA:avisitn4 -0.31782 0.44813 -0.7092 0.47819 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0 0 1 1 1 1 1 1\n[2,] 0 0 1 1 1 1 1 1\n[3,] 1 1 0 0 1 1 1 1\n[4,] 1 1 0 0 1 1 1 1\n[5,] 1 1 1 1 0 0 1 1\n[6,] 1 1 1 1 0 0 1 1\n[7,] 1 1 1 1 1 1 0 0\n[8,] 1 1 1 1 1 1 0 0\n\np-value of Null model: 0.68672 \n```\n\n\n:::\n:::\n\n\n### Nominal variable - Example\n\n```sas\nproc gee data=resp ;\nclass trtp(ref=\"P\") avisitn(ref='1') usubjid respnom(ref='Lung'); \nmodel respnom=trtp avisitn trtp*avisitn/ dist=multinomial link=glogit ; \nrepeated subject=usubjid /corr=ind /*modelse*/; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nods output GEEEmpPEst=GEEEmpPEst_nom ;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/r_sas_2_nominal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr = \"independence\")\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR NOMINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Baseline Category Logit \n\nLocal Odds Ratios:\nStructure: independence\nHomogenous scores: TRUE\n\ncall:\nmultgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"independence\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n-0.4444 -0.3684 -0.3333 0.0000 0.6111 0.7778 \n\nNumber of Iterations: 1 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|)\nbeta10 0.22314 0.33541 0.6653 0.5059\ntrtpA:1 -0.62861 0.50139 -1.2537 0.2099\navisitn2:1 0.18232 0.43095 0.4231 0.6722\navisitn3:1 0.01325 0.47862 0.0277 0.9779\navisitn4:1 -0.06899 0.48140 -0.1433 0.8860\ntrtpA:avisitn2:1 0.16252 0.63421 0.2563 0.7977\ntrtpA:avisitn3:1 0.95184 0.69786 1.3639 0.1726\ntrtpA:avisitn4:1 0.64631 0.70063 0.9225 0.3563\nbeta20 0.27193 0.33184 0.8195 0.4125\ntrtpA:2 0.01575 0.45535 0.0346 0.9724\navisitn2:2 0.18005 0.42536 0.4233 0.6721\navisitn3:2 0.15551 0.46598 0.3337 0.7386\navisitn4:2 -0.27193 0.50787 -0.5354 0.5924\ntrtpA:avisitn2:2 -0.25642 0.60253 -0.4256 0.6704\ntrtpA:avisitn3:2 0.11642 0.64694 0.1800 0.8572\ntrtpA:avisitn4:2 0.15610 0.63994 0.2439 0.8073\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0 0 1 1 1 1 1 1\n[2,] 0 0 1 1 1 1 1 1\n[3,] 1 1 0 0 1 1 1 1\n[4,] 1 1 0 0 1 1 1 1\n[5,] 1 1 1 1 0 0 1 1\n[6,] 1 1 1 1 0 0 1 1\n[7,] 1 1 1 1 1 1 0 0\n[8,] 1 1 1 1 1 1 0 0\n\np-value of Null model: 0.28295 \n```\n\n\n:::\n:::\n\n\n# REFERENCES\n\n\\[1\\] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n\\[2\\] [SAS/STAT® 13.1 User's Guide The GEE Procedure.](https://support.sas.com/documentation/onlinedoc/stat/141/gee.pdf)\n\n\\[3\\] [SAS/STAT® 13.1 User's Guide The GENMOD Procedure.](https://support.sas.com/documentation/onlinedoc/stat/131/genmod.pdf)\n\n\\[4\\] [Generalized Estimating Equation Package](https://cran.r-project.org/web/packages/geepack/geepack.pdf)\n\n\\[5\\] [Generalized Estimation Equation Solver](https://cran.r-project.org/web/packages/gee/gee.pdf)\n\n\\[6\\] [Touloumis A. (2015). \"R Package multgee: A Generalized Estimating Equations Solver for Multinomial Responses.\" Journal of Statistical Software.](https://www.jstatsoft.org/article/view/v064i08)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_kruskalwallis/execute-results/html.json b/_freeze/Comp/r-sas_kruskalwallis/execute-results/html.json index 9f4c7835c..a3bcc49b8 100644 --- a/_freeze/Comp/r-sas_kruskalwallis/execute-results/html.json +++ b/_freeze/Comp/r-sas_kruskalwallis/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "6b9825af98efb2959c72216e389e4422", + "hash": "c779ca1736a9b12ee359fbaafd26295a", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Kruskal Wallis R v SAS\"\nexecute: \n eval: false\n---\n\n## Kruskal-Wallis: R and SAS\n\nFrom the individual R and SAS pages, performing the Kruskal-Wallis test in R using:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::kruskal.test(Sepal_Width ~ Species, data = iris_sub)\n```\n:::\n\n\nand in SAS using:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc npar1way data=iris_sub wilcoxon;\n class Species;\n var Sepal_Width;\n exact;\nrun;\n```\n:::\n\n\nproduced the same results for the test statistic and asymptotic p-value.\n\nThere is a difference between languages in that SAS provides the EXACT option to easily output the exact p-value, where R does not seem to have an equivalent. A Monte Carlo permutation test may offer an alternative to the exact test on R. The `coin` package could help in implementing this.", + "markdown": "---\ntitle: \"Kruskal Wallis R v SAS\"\n---\n\n## Kruskal-Wallis: R and SAS\n\nFrom the individual R and SAS pages, performing the Kruskal-Wallis test in R using:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::kruskal.test(Sepal_Width ~ Species, data = iris_sub)\n```\n:::\n\n\nand in SAS using:\n\n```sas\nproc npar1way data=iris_sub wilcoxon;\n class Species;\n var Sepal_Width;\n exact;\nrun;\n```\n\nproduced the same results for the test statistic and asymptotic p-value.\n\nThere is a difference between languages in that SAS provides the EXACT option to easily output the exact p-value, where R does not seem to have an equivalent. A Monte Carlo permutation test may offer an alternative to the exact test on R. The `coin` package could help in implementing this.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_logistic-regr/execute-results/html.json b/_freeze/Comp/r-sas_logistic-regr/execute-results/html.json index 2b15b538d..68387aef1 100644 --- a/_freeze/Comp/r-sas_logistic-regr/execute-results/html.json +++ b/_freeze/Comp/r-sas_logistic-regr/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "dc6f14f93f09543b9e1d6ea7ef5048bf", + "hash": "94ad018c8f0f6965b2339f41392c3b94", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS: Logistic Regression\"\ntoc: true\necho: true\neval: false\nkeep-hidden: true\n---\n\n# Summary\n\n## Goal\n\nComparison of results between SAS vs R for different applications of logistic regression; where possible we try to ensure the same statistical method or algorithm is specified. However, there are some underlying differences between the algorithms in SAS vs R that cannot be (at least not easily) \"tweaked\". The document also provides some remarks on what parameters to look out for and what could have caused the numerical differences.\n\n## Scope\n\n::::::: columns\n:::: {.column width=\"45%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Methodologies\n\n- Logistic regression\n- Firth's bias-reduced logistic regression\n- g-computation / standardization with covariate adjustment\n:::\n::::\n\n:::: {.column width=\"55%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Technical implementations\n\n- SAS: `PROC LOGISTIC` (with and without firth option) and `%margins` macro\\\n- R: `stats::glm`, `logistf::logistf` and `beeca::get_marginal_effect`\n:::\n::::\n:::::::\n\n## Findings\n\nBelow are summary of findings from a numerical comparison using example data, where possible we specify the same algorithm in R and SAS.\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Logistic regression\n\nMaximum Likelihood Estimates and p-values for the Model Parameters have an exact match (at 0.001 level) using `glm` in R vs `PROC LOGISTIC` procedure (without Firth option) in SAS.\n\nWhen using GLM parameterization (see [SAS page](https://psiaims.github.io/CAMIS/SAS/logistic-regr.html) for explanation of SAS parameterization types), the parameters estimates (and 95% CIs) can be exponentiated to provide odds ratios and 95% CIs for odds ratios.\n\nAs default for categorical variables, R uses the first category as reference see [R page](https://psiaims.github.io/CAMIS/R/logistic-regr.html), and SAS uses the last category as reference group. Check your design matrix in SAS, and `contr.` options in R to ensure interpretation of estimates from the model is correct, then results align.\n\nAn exact match (at 0.001 level) is obtained for the Odds ratios and CIs when the same method and same parameterization is used, however SAS Proc Logistic can only calculate Wald CI's. Profile likelihood CIs are not available.\n\nR using glm() function, can use the confint() function to calculate CI's using the profile likelihood method or the confint.default() function to calculate CIs using the Wald method.\n:::\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Firth logistic regression\n\nExact match cannot be obtained for all estimates using `logistf` vs `PROC LOGISTIC` procedure (with Firth option). More specifically:\\\n- Coefficient estimate and 95% CI matched at 0.001 level;\\\n- Standard error are not the same (e.g., 0.02023 for age in R vs 0.02065 in SAS);\\\n- p-value is not the same (0.6288 in R for age vs 0.6348 in SAS);\\\n:::\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### g-computation with covariate adjustment\n\nExact match (at 0.001 level) can be obtained using `get_marginal_effect` in R vs `%margins` macro in SAS.\n:::\n\nIn the following sections, the parameterisation of logistic regression implementation (with an without Firth option) will be compared followed by numerical comparison using example data.\n\n# Prerequisites\n\n## R packages\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\n```\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\n── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──\n✔ dplyr 1.2.0 ✔ readr 2.1.6\n✔ forcats 1.0.1 ✔ stringr 1.6.0\n✔ ggplot2 4.0.2 ✔ tibble 3.3.1\n✔ lubridate 1.9.5 ✔ tidyr 1.3.2\n✔ purrr 1.2.1 \n── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──\n✖ dplyr::filter() masks stats::filter()\n✖ dplyr::lag() masks stats::lag()\nℹ Use the conflicted package () to force all conflicts to become errors\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(survival) # for example data\nlibrary(logistf) # for firth regression\nlibrary(beeca) # for covariate adjustment\n```\n:::\n\n\n## Data\n\n### Logistic regressions\n\nWe use the `lung` dataset provided with {survival} R package. Initial data preparation involves generating a new binary outcome based on the weight change.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# the lung dataset is available in ./data/lung_cancer.csv\nlung2 <- survival::lung |>\n mutate(\n wt_grp = factor(wt.loss > 0, labels = c(\"weight loss\", \"weight gain\"))\n )\nglimpse(lung2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nRows: 228\nColumns: 11\n$ inst 3, 3, 3, 5, 1, 12, 7, 11, 1, 7, 6, 16, 11, 21, 12, 1, 22, 16…\n$ time 306, 455, 1010, 210, 883, 1022, 310, 361, 218, 166, 170, 654…\n$ status 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, …\n$ age 74, 68, 56, 57, 60, 74, 68, 71, 53, 61, 57, 68, 68, 60, 57, …\n$ sex 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 2, 1, …\n$ ph.ecog 1, 0, 0, 1, 0, 1, 2, 2, 1, 2, 1, 2, 1, NA, 1, 1, 1, 2, 2, 1,…\n$ ph.karno 90, 90, 90, 90, 100, 50, 70, 60, 70, 70, 80, 70, 90, 60, 80,…\n$ pat.karno 100, 90, 90, 60, 90, 80, 60, 80, 80, 70, 80, 70, 90, 70, 70,…\n$ meal.cal 1175, 1225, NA, 1150, NA, 513, 384, 538, 825, 271, 1025, NA,…\n$ wt.loss NA, 15, 15, 11, 0, 0, 10, 1, 16, 34, 27, 23, 5, 32, 60, 15, …\n$ wt_grp NA, weight gain, weight gain, weight gain, weight loss, weig…\n```\n\n\n:::\n:::\n\n\n### g-computation\n\nWe use the `trial01` dataset provided with {beeca} R package. Initial data preparation involves setting the treatment indicator as a categorical variable and removing any incomplete cases.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"trial01\")\n\ntrial01$trtp <- factor(trial01$trtp) ## set treatment to a factor\n\ntrial01 <- trial01 |>\n filter(!is.na(aval)) ## remove missing data i.e complete cases analysis\n\n# save the dataset to be imported in SAS\n# write.csv(trial01, file = \"data/trial01.csv\", na = \".\")\n```\n:::\n\n\n# Logistic Regression\n\n## Parameterisation Comparison\n\nThe following set of tables compare how to configure particular parameters / attributes of the methodologies.\n\n| Attribute | SAS
`PROC LOGISTIC` | R
`stats::glm` | Description | Note |\n|:-------------:|:-------------:|:-------------:|:--------------|:--------------|\n| Likelihood optimization algorithm | Default | Default | Fisher's scoring method (i.e., iteratively reweighted least squares (IRLS)) | For logistic regression, parameter estimates and covariance matrices estimated should be the same for both Fisher's and Newton-Raphson algorithm for maximum likelihood. |\n| Convergence criteria | Default | NA | Specifies relative gradient convergence criterion (GCONV=1E--8) | In`PROC LOGISTIC` there are three other convergence criteria which can be specified. However, there is no exact criterion that matches the criteria in `stats::glm`. |\n| Convergence criteria | NA | Default | Specifies relative difference between deviance \\< 1E--8. | |\n| Confidence interval (CI) estimation method | Default | `confint.default()` | Wald CI | In `stats::glm` in R, function confint.default() gives the Wald confidence limits; whereas function confint() gives the profile-likelihood limits. |\n| Hypothesis tests for regression coefficients | Default | Default | Wald tests, which are based on estimates for the regression coefficients and its corresponding standard error. | |\n\n: Standard Logistic Regression in SAS vs R {#tbl-1}\n\n## Numerical Comparison {#sec-num-comp}\n\nEvery effort is made to ensure that the R code employs estimation methods/ optimization algorithms/ other components that closely match (as much as possible) those used in the SAS code.\n\n### `glm` in R\n\nNote, the default fitting method in `glm` is consistent with the default fitting method in `PROC LOGISTIC` procedure.\n\n- Default fitting method in `glm` is iteratively reweighted least squares, and the documentation can be found [here](https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/glm).\n- Default fitting method for `PROC LOGISTIC` procedure is Fisher's scoring method, which is reported as part of the SAS default output, and it is equivalent to \"Iteratively reweighted least squares\" method as reported in this [documentation](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_logistic_sect033.htm).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nm1 <- stats::glm(\n wt_grp ~ age + sex + ph.ecog + meal.cal,\n data = lung2,\n family = binomial(link = \"logit\")\n)\n\n# model coefficients summary\nsummary(m1)$coefficients\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error z value Pr(>|z|)\n(Intercept) 3.2631672833 1.6488206996 1.9790917 0.04780569\nage -0.0101717451 0.0208107243 -0.4887742 0.62500157\nsex -0.8717357187 0.3714041991 -2.3471348 0.01891841\nph.ecog 0.4179665342 0.2588653214 1.6146100 0.10639518\nmeal.cal -0.0008869427 0.0004467405 -1.9853642 0.04710397\n```\n\n\n:::\n:::\n\n\nNote, function `confint.default` gives the Wald confidence limits, which is the default option in SAS `PROC LOGISTIC` procedure; whereas `confint` gives the profile-likelihood limits. Conditional odds ratio is calculated by taking the exponential of the model parameters.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncbind(est = coef(m1), confint.default(m1))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est 2.5 % 97.5 %\n(Intercept) 3.2631672833 0.031538095 6.494796e+00\nage -0.0101717451 -0.050960015 3.061653e-02\nsex -0.8717357187 -1.599674572 -1.437969e-01\nph.ecog 0.4179665342 -0.089400173 9.253332e-01\nmeal.cal -0.0008869427 -0.001762538 -1.134731e-05\n```\n\n\n:::\n:::\n\n\n### `PROC LOGISTIC` in SAS (without firth option)\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC LOGISTIC DATA=LUNG2; # import lung\n\tMODEL WT_GRP(EVENT=\"weight_gain\") = AGE SEX PH_ECOG MEAL_CAL;\n\tods output ESTIMATEs=estimates;\nrun;\n```\n:::\n\n\nBelow is screenshot of output tables summarizing coefficient estimates and confidence intervals\n\n![](../images/logistic_regression/sas_logistic_estimates.png){fig-align=\"left\"}\n\n![](../images/logistic_regression/sas_logistic_ci.png){fig-align=\"left\"}\n\n### Comment on model selection\n\nAs indicated in [Logistic regression in R](https://psiaims.github.io/CAMIS/R/logistic_regr.html) and [Logistic regression in SAS](https://psiaims.github.io/CAMIS/SAS/logistic-regr.html), the chi-Sq test statistics and p-values are different when performing model selections in R vs. SAS. The reason for this discrepancy is that the chi-Sq statistics from `anova()` in R is based on deviance test using residual deviance while the chi-Sq statistics from `PROC LOGISTIC` w/ `SELECTION` option in SAS is based on Wald test using z-values squared.\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for logistic regression\n\nExact match (at 0.001 level) can be obtained using `glm` in R vs `PROC LOGISTIC` procedure (without Firth option) in SAS, for coefficient estimates, 95% CI, and for p-value.\n:::\n\n# Firth logistic regression\n\nThe following set of tables compare how to configure particular parameters / attributes of the methodologies.\n\n## Parameterisation Comparison\n\n| Attribute | SAS
`PROC LOGISTIC` w/ Firth option | R
`logistf::logistf` | Description | Note |\n|:-------------:|:--------------|:--------------|:--------------|:--------------|\n| Likelihood optimization algorithm | Default | `control =`
`logistf.control`
`(fit =“IRLS”)` | Fisher's scoring method (i.e., iteratively reweighted least squares (IRLS)) | |\n| Likelihood optimization algorithm | `TECHNIQUE = NEWTON` | Default | Newton-Raphson algorithm | |\n| Convergence criteria | Default | NA | Specifies relative gradient convergence criterion (GCONV=1E--8). | In`PROC LOGISTIC` there are three other convergence criteria which can be specified. If more than one convergence criterion is specified, the optimization is terminated as soon as one of the criteria is satisfied. |\n| Convergence criteria | NA | Default | Specifies three criteria that need to be met: the change in log likelihood is less than lconv (default is 1E-5), the maximum absolute element of the score vector is less than gconv (default is 1E-5), and the maximum absolute change in beta is less than xconv (default is 1E-5). | The gconv criteria in `logistif` is different from `GCONV` in SAS. The lconv criteria is also not exactly the same as the `ABSFCONV` or `FCONV` in `PROC LOGISTIC` in SAS, although the criteria use log likelihood. However, the `xconv` in R and `XCONV` in SAS seems to be consistent. |\n| Convergence criteria | `XCONV = 1E–8` | `control = logistf.control( xconv = 1E–8, lconv = 1, gconv = 1)` | Specifies the maximum absolute change in beta \\< 1E--8. | In `logistf`, three convergence criteria are checked at the same time. So here we use a large convergence criteria value for `lconv` and `gconv` to mimic the scenario where only `xconv` is checked. |\n| Confidence interval (CI) estimation method | Default | `pl= FALSE` | Wald CI | For `logistf`: \"Note that from version 1.24.1 on, the variance-covariance matrix is based on the second derivative of the likelihood of the augmented data rather than the original data, which proved to be a better approximation if the user chooses to set a higher value for the penalty strength.\" This could cause differences in standard error estimates in R vs SAS for Firth logistic regression, and consequently results in differences in the corresponding Wald CI estimates and hypothesis tests results (e.g., p-values). |\n| Confidence interval (CI) estimation method | `CLPARM = PL`
`CLODDS = PL` | Default | Profile likelihood-based CI | For Firth's bias-reduced logistic regression, it makes more sense to use penalized likelihood-based CI so it is consistent with the parameter estimation method which uses penalized maximum likelihood. |\n| Hypothesis tests for regression coefficients | Default | pl= FALSE | Wald tests, which are based on estimates for the regression coefficients and its corresponding standard error. | |\n| Hypothesis tests for regression coefficients | NA | Default | \"Likelihood ratio tests\", which are based on profile penalized log likelihood. | In SAS, when the model statement option `CLPARM = PL` is specified, the CI will be calculated based on profile likelihood. However, the hypothesis testing method is still a Wald method. This could cause results mismatch in the p-value. |\n\n: Firth's Bias-Reduced Logistic Regression in SAS vs R {#tbl-2}\n\n## Numerical Comparison\n\nNote that while Firth logistic regression is not required for our example dataset nonetheless we use it for demonstration purposes only.\n\n### `logistf` in R\n\n- By default, the [convergence criteria in `logistf`](https://cran.r-project.org/web/packages/logistf/logistf.pdf) specifies that three criteria need to be met at the same time, i.e., the change in log likelihood is less than lconv (default is 1E-5), the maximum absolute element of the score vector is less than gconv (default is 1E-5), and the maximum absolute change in beta is less than xconv (default is 1E-5). In SAS, the [default convergence criteria in `PROC LOGISTIC`](https://support.sas.com/documentation/cdl/en/statug/63962/HTML/default/viewer.htm#statug_logistic_sect034.htm) specifies relative gradient convergence criterion (GCONV=1E--8); while SAS also support three other convergence criteria but when there are more than one convergence criterion specified, the optimization is terminated as soon as one of the criteria is satisfied. By looking at the R pacakge/SAS documentation, the `gconv` criteria in `logistif` function is different from the `GCONV` in SAS. The `lconv` criteria is also not exactly the same as the `ABSFCONV` or `FCONV` in PROC LOGISTIC in SAS, although the criteria use log likelihood. However, similar convergence criteria might be obtained by using the maximum absolute change in parameter estimates (i.e., `xconv` in R and SAS). Therefore, for comparison with the SAS output, in `logistf` function, we use a large convergence criteria value for `lconv` and `gconv` to mimic the scenario where only `xconv` is checked, i.e., specify `logistf.control(xconv = 0.00000001, gconv = 1, lconv = 1)` for the `control` argument.\n\n- By default, `logistf` function in R computes the confidence interval estimates and hypothesis tests (including p-value) for each parameter based on profile likelihood, which is also reported in the output below. However, Wald method (confidence interval and tests) can be specified by specifying the `control` argument with [`pl = FALSE`](https://cran.r-project.org/web/packages/logistf/logistf.pdf).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfirth_mod <- logistf(\n wt_grp ~ age + sex + ph.ecog + meal.cal,\n data = lung2,\n control = logistf.control(\n fit = \"IRLS\",\n xconv = 0.00000001,\n gconv = 1,\n lconv = 1\n )\n)\nsummary(firth_mod)$coefficients\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nlogistf(formula = wt_grp ~ age + sex + ph.ecog + meal.cal, data = lung2, \n control = logistf.control(fit = \"IRLS\", xconv = 1e-08, gconv = 1, \n lconv = 1))\n\nModel fitted by Penalized ML\nCoefficients:\n coef se(coef) lower 0.95 upper 0.95 Chisq\n(Intercept) 3.1532937589 1.6031659729 0.051844703 6.410119e+00 3.9726447\nage -0.0098111679 0.0202315630 -0.050518148 2.974343e-02 0.2337368\nsex -0.8455619163 0.3632129422 -1.571158740 -1.356810e-01 5.4536777\nph.ecog 0.4018229715 0.2520090355 -0.090278518 9.093255e-01 2.5553004\nmeal.cal -0.0008495327 0.0004288525 -0.001722033 -7.098976e-06 3.9058205\n p method\n(Intercept) 0.04624509 2\nage 0.62876680 2\nsex 0.01952718 2\nph.ecog 0.10992492 2\nmeal.cal 0.04811912 2\n\nMethod: 1-Wald, 2-Profile penalized log-likelihood, 3-None\n\nLikelihood ratio test=10.54964 on 4 df, p=0.03212009, n=170\nWald test = 33.85701 on 4 df, p = 7.972359e-07\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n (Intercept) age sex ph.ecog meal.cal \n 3.1532937589 -0.0098111679 -0.8455619163 0.4018229715 -0.0008495327 \n```\n\n\n:::\n\n```{.r .cell-code}\n## Code below would give Wald CI and tests results by adding `pl = FALSE`\n# logistf(..., pl = FALSE)\n```\n:::\n\n\nNote, function `confint` gives the profile-likelihood limits. Given the parameters from Firth's bias-reduced logistic regression is estimated using penalized maximum likelihood, `confint` function is used. Conditional odds ratio is calculated by taking the exponential of the model parameters.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncbind(est = coef(firth_mod), confint(firth_mod))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est Lower 95% Upper 95%\n(Intercept) 3.1532937589 0.051844703 6.410119e+00\nage -0.0098111679 -0.050518148 2.974343e-02\nsex -0.8455619163 -1.571158740 -1.356810e-01\nph.ecog 0.4018229715 -0.090278518 9.093255e-01\nmeal.cal -0.0008495327 -0.001722033 -7.098976e-06\n```\n\n\n:::\n:::\n\n\n### `PROC LOGISTIC` in SAS (with firth option)\n\n- Note, by default, SAS computes confidence interval based on Wald tests. Given the parameters from Firth's method is estimated using penalized maximum likelihood, below specifies CLODDS = PL CLPARM=PL (based on profile likelihood), which is consistent with the maximization method and the R code above. However, the [default hypothesis test for the regression coefficients](https://go.documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_logistic_details50.htm) is still a Wald test, and the Chi-square statistics is calculated based on coefficient estimate and its corresponding standard error.\n\n- `XCONV` specifies relative parameter convergence criterion, which should correspond to the `xconv` in `logistf` function in R. We specify `XCONV = 0.00000001` so it should be consistent with the R code above.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC LOGISTIC DATA=LUNG2;\n\tMODEL WT_GRP(EVENT=\"weight gain\") = AGE SEX PH_ECOG MEAL_CAL / firth \n clodds=PL clparm=PL xconv = 0.00000001;\n\tods output ESTIMATEs=estimates;\nrun;\n```\n:::\n\n\nBelow is screenshot of output tables summarizing coefficient estimates and it's 95% CI\n\n![](../images/logistic_regression/sas_logistic_firth_estimates.png){fig-align=\"left\"}\n\n![](../images/logistic_regression/sas_logistic_firth_ci.png){fig-align=\"left\"}\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for Firth logistic regression\n\nExact match cannot be obtained for all estimates using `logistf` vs `PROC LOGISTIC` procedure with Firth option. More specifically:\\\n- Coefficient estimate and its 95% CI matched at 0.001 level;\\\n- Standard error are not the same (e.g., 0.02023 for age in R vs 0.02065 in SAS);\\\n- p-value is not the same (0.6288 in R for age vs 0.6348 in SAS);\\\n:::\n\n# g-computation with covariate adjustment\n\nWe compare two implementions of g-computation in SAS:\n\n1. The \"Predictive margins and average marginal effects\" [%margins](https://support.sas.com/kb/63/038.html#pur) macro. The %margins macro uses \"the delta method \\[...\\] to determine the standard errors for predictive margins and marginal effects\". Note that the %margins macro uses the `PROC GENMOD` procedure to implement the working logistic regression model and require another macro [%NLEST](https://support.sas.com/kb/58/775.html) to calculate contrasts that requires delta methodl such as risk ratio or odds ratio.\n2. The SAS code provided in the appendix of the [Ge et al. (2011)](https://journals.sagepub.com/doi/10.1177/009286151104500409) implements the method outlined in the associated paper and simulations. Note: the Ge et al. (2011) macro uses the `PROC LOGISTIC` procedure to implement the working logistic regression model. `PROC IML` is used to calculate the delta method to determine the standard errors.\n\n## Numerical Comparison\n\n### `get_marginal_effect` in R\n\nWe fit a logistic regression model with covariate adjustment to estimate the marginal treatment effect using the delta method for variance estimation: as outlined in Ge et al (2011).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## fit the model including model based variance estimation with delta method\nfit1 <- stats::glm(aval ~ trtp + bl_cov, family = \"binomial\", data = trial01) |>\n beeca::get_marginal_effect(\n trt = \"trtp\",\n method = \"Ge\",\n contrast = \"diff\",\n reference = \"0\",\n type = \"model-based\"\n )\n\n\"Marginal treatment effect\"\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] \"Marginal treatment effect\"\n```\n\n\n:::\n\n```{.r .cell-code}\nfit1$marginal_est\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n diff: 1-0 \n-0.06836399 \nattr(,\"reference\")\n[1] \"0\"\nattr(,\"contrast\")\n[1] \"diff: 1-0\"\n```\n\n\n:::\n\n```{.r .cell-code}\n\"Standard error\"\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] \"Standard error\"\n```\n\n\n:::\n\n```{.r .cell-code}\nfit1$marginal_se\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n diff: 1-0 \n0.06071641 \nattr(,\"reference\")\n[1] \"0\"\nattr(,\"contrast\")\n[1] \"diff: 1-0\"\nattr(,\"type\")\n[1] \"Ge - model-based\"\n```\n\n\n:::\n:::\n\n\n### `%Margins` macro in SAS\n\nWe now use the SAS [`%Margins`](https://support.sas.com/kb/63/038.html) macro to perform the Ge et al. (2011) method on `trial01` to estimate the marginal risk difference and it's standard error.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%Margins(data = myWork.trial01,\n class = trtp,\n classgref = first, /*Set reference to first level*/\n response = avaln,\n roptions = event='1', /*Ensure event is set to 1 = Yes */\n dist = binomial, \n model = trtp bl_cov,\n margins = trtp, \n options = cl diff reverse, /*Specify risk difference contrast and \n direction of treatment effect is correct*/\n link = logit); /*Specify logit link function */\n\t\n** Store output data sets ; \ndata myWork.margins_trt_estimates;\n set work._MARGINS;\nrun;\n\ndata myWork.margins_trt_diffs;\n set work._DIFFSPM;\nrun;\n```\n:::\n\n\n![](../images/logistic_regression/sas_logistic_gcomp_margins.png){fig-align=\"left\"}\n\n### `%LR` macro in SAS (Ge et al, 2011)\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%LR(data = myWork.trial01, /* input data set */\n\tvar1 = bl_cov, /* continuous covariates in the logistic regression */\n\tvar2 = trtp, /* categorical covariates in the logistic regression */\n\tp1 = 1, /* number of continuous covariates in the logistic regression */\n\tp2 = 1, /* number of categorical covariates in the logistic regression */\n\tresp = avaln, /* binary response variable in the logistic regression */\n\tntrt = 1); /* position of the treatment variable in the categorical covariates */\n\t\ndata myWork.ge_macro_trt_diffs;\n set work.geout;\nrun;\n```\n:::\n\n\n![](../images/logistic_regression/sas_logistic_gcomp_ge.png){fig-align=\"left\"}\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for g-computation with covariate adjustment\n\nExact match at the 0.001 level.\n:::\n\n# Final remarks\n\nIn summary, there are a few things to be aware of when comparing logistic regression results in R vs SAS. It is crucial to carefully manage the input parameters for each model to ensure they are configured similarly for logistic regression analyses. As highlighted also in [Logistic Regression in SAS](https://psiaims.github.io/CAMIS/SAS/logistic-regr.html), the variable parameterization is also important for modelling and interpretation, ensuring the types of variable (continuous vs. categorical) and reference values of categorical variable are applied as expected.\n\n1. **Likelihood optimization method**\n\n- The default likelihood optimization method in `glm` and `PROC LOGISTIC` is the same (i.e., Fisher's scoring method or iteratively reweighted least squares (IRLS)).\n\n- However, the default optimization method in `logistf` is Newton-Raphson, which can be modified into IRLS via `control = logistf.control(fit = “IRLS”)`. Alternatively, one could specify `technique = newton` in the model statement in SAS to modify the likelihood optimization method.\n\n2. **Convergence criteria**\n\n- Although both SAS and R allows options to modify the convergence criteria, the criteria does not seem to be exactly the same, which could cause results mismatch in some scenarios.\n\n- The [default convergence criteria in `PROC LOGISTIC`](https://support.sas.com/documentation/cdl/en/statug/63962/HTML/default/viewer.htm#statug_logistic_sect034.htm) specifies the relative gradient convergence criterion; where the [default convergence criteria in `glm`](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/glm.control.html) specifies relative difference between deviance.\n\n- The default setting in logistf have checked more than one convergence criterion in its algorithm (i.e., [change in log likelihood, derivative of the log likelihood and parameter estimates](https://cran.r-project.org/web/packages/logistf/logistf.pdf)). One could specify a very large value for two of the criteria in order to mimic the scenario where only one criterion is checked (e.g., `control = logistf.control (xconv = 0.00000001, lconv = 1, gconv = 1`) in `logistf` in R should be consistent to the option of `xconv = 0.00000001` in SAS).\n\n3. **Confidence interval**\n\n- The `confint()` function in R will computes profile likelihood based CI for `glm` fitted model. However, in SAS, the default confidence interval is Wald CI. To match the default CI calculation in SAS for `glm` fitted model, use `confint.default()` function in R.\n\n- Nevertheless, Firth's biased-reduced logistic regression estimates parameter using penalized maximum likelihood, it makes more sense to use `confint()` function for `logistf` fitted model. In the meantime, in SAS, when fitting a Firth's logistic regression, it is also better to specify the model statement option `clparm = pl` which will also generate profile penalized likelihood CI.\n\n- We shall note that in the Firth logistic regression numerical example, the estimated standard errors does not match, but the CIs match at 0.001 level. This is because the CI was estimated based on profile penalized likelihood in R and SAS, and please see the next discussion point for potential reasons about differences between the estimated standard error. (I have compared Wald CIs estimated in R vs SAS, which could not match. This make sense as Wald CIs are calculated based on the estimated standard errors.)\n\n4. **Hypothesis test and p-value**\n\n- The default hypothesis tests for the regression coefficients are the same in `glm` and `PROC LOGISTIC`, which are both Wald tests and calculated based on estimates for the regression coefficients and its corresponding standard error.\n\n- As for `logistf` function, the default hypothesis testing method is based on profile penalized log likelihood (source code [here](https://github.com/georgheinze/logistf/blob/master/R/logistf.R)). And it was noted in the [R documentation](https://cran.r-project.org/web/packages/logistf/logistf.pdf) that, *\"from version 1.24.1 on, the variance-covariance matrix is based on the second derivative of the likelihood of the augmented data rather than the original data, which proved to be a better approximation if the user chooses to set a higher value for the penalty strength.\"* This could cause difference in the estimate of standard error in R vs SAS for Firth logistic regression, and consequently results in differences in the corresponding Wald CI estimates and hypothesis tests results (e.g., p-values).\n\n- Wald method can be used in a `logistf` function in R by specifying `pl = FALSE` in the `control` argument, which should correspond to the method used in SAS to calculate p-value. However, when specifying `pl = FALSE`, the CI is also calculated using Wald method.\n\n# Reference\n\n- A relevant blog [here](https://sas-and-r.blogspot.com/2010/11/example-815-firth-logistic-regression.html) (check comments in the blog).\n- [PROC LOGISTIC statement documentation in SAS](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_logistic_sect004.htm).\n- [Reference manual for `logistf` package in R](https://cran.r-project.org/web/packages/logistf/logistf.pdf).\n- [GitHub repository for `logistf` package in R](https://github.com/georgheinze/logistf).\n- [GitHub repository for a SAS procedure about Firth logistic regression authored by the author of `logistf` R package](https://github.com/georgheinze/flicflac/tree/master/LogisticRegression), which was based on PROC IML instead of PROC LOGISTIC and was probably authored before the availability of Firth option in PROC LOGISTIC statement in SAS.\n- Ge, Miaomiao, et al. \"Covariate-adjusted difference in proportions from clinical trials using logistic regression and weighted risk differences.\" Drug information journal: DIJ/Drug Information Association 45 (2011): 481-493.\n- SAS Institute Inc. [\"Predictive margins and average marginal effects.\"](https://support.sas.com/kb/63/038.html) (Last Published: 13 Dec 2023)", + "markdown": "---\ntitle: \"R vs SAS: Logistic Regression\"\ntoc: true\necho: true\neval: false\nkeep-hidden: true\n---\n\n# Summary\n\n## Goal\n\nComparison of results between SAS vs R for different applications of logistic regression; where possible we try to ensure the same statistical method or algorithm is specified. However, there are some underlying differences between the algorithms in SAS vs R that cannot be (at least not easily) \"tweaked\". The document also provides some remarks on what parameters to look out for and what could have caused the numerical differences.\n\n## Scope\n\n::::::: columns\n:::: {.column width=\"45%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Methodologies\n\n- Logistic regression\n- Firth's bias-reduced logistic regression\n- g-computation / standardization with covariate adjustment\n:::\n::::\n\n:::: {.column width=\"55%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Technical implementations\n\n- SAS: `PROC LOGISTIC` (with and without firth option) and `%margins` macro\\\n- R: `stats::glm`, `logistf::logistf` and `beeca::get_marginal_effect`\n:::\n::::\n:::::::\n\n## Findings\n\nBelow are summary of findings from a numerical comparison using example data, where possible we specify the same algorithm in R and SAS.\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Logistic regression\n\nMaximum Likelihood Estimates and p-values for the Model Parameters have an exact match (at 0.001 level) using `glm` in R vs `PROC LOGISTIC` procedure (without Firth option) in SAS.\n\nWhen using GLM parameterization (see [SAS page](https://psiaims.github.io/CAMIS/SAS/logistic-regr.html) for explanation of SAS parameterization types), the parameters estimates (and 95% CIs) can be exponentiated to provide odds ratios and 95% CIs for odds ratios.\n\nAs default for categorical variables, R uses the first category as reference see [R page](https://psiaims.github.io/CAMIS/R/logistic-regr.html), and SAS uses the last category as reference group. Check your design matrix in SAS, and `contr.` options in R to ensure interpretation of estimates from the model is correct, then results align.\n\nAn exact match (at 0.001 level) is obtained for the Odds ratios and CIs when the same method and same parameterization is used, however SAS Proc Logistic can only calculate Wald CI's. Profile likelihood CIs are not available.\n\nR using glm() function, can use the confint() function to calculate CI's using the profile likelihood method or the confint.default() function to calculate CIs using the Wald method.\n:::\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Firth logistic regression\n\nExact match cannot be obtained for all estimates using `logistf` vs `PROC LOGISTIC` procedure (with Firth option). More specifically:\\\n- Coefficient estimate and 95% CI matched at 0.001 level;\\\n- Standard error are not the same (e.g., 0.02023 for age in R vs 0.02065 in SAS);\\\n- p-value is not the same (0.6288 in R for age vs 0.6348 in SAS);\\\n:::\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### g-computation with covariate adjustment\n\nExact match (at 0.001 level) can be obtained using `get_marginal_effect` in R vs `%margins` macro in SAS.\n:::\n\nIn the following sections, the parameterisation of logistic regression implementation (with an without Firth option) will be compared followed by numerical comparison using example data.\n\n# Prerequisites\n\n## R packages\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(survival) # for example data\nlibrary(logistf) # for firth regression\nlibrary(beeca) # for covariate adjustment\n```\n:::\n\n\n## Data\n\n### Logistic regressions\n\nWe use the `lung` dataset provided with {survival} R package. Initial data preparation involves generating a new binary outcome based on the weight change.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# the lung dataset is available in ./data/lung_cancer.csv\nlung2 <- survival::lung |>\n mutate(\n wt_grp = factor(wt.loss > 0, labels = c(\"weight loss\", \"weight gain\"))\n )\nglimpse(lung2)\n```\n:::\n\n\n### g-computation\n\nWe use the `trial01` dataset provided with {beeca} R package. Initial data preparation involves setting the treatment indicator as a categorical variable and removing any incomplete cases.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"trial01\")\n\ntrial01$trtp <- factor(trial01$trtp) ## set treatment to a factor\n\ntrial01 <- trial01 |>\n filter(!is.na(aval)) ## remove missing data i.e complete cases analysis\n\n# save the dataset to be imported in SAS\n# write.csv(trial01, file = \"data/trial01.csv\", na = \".\")\n```\n:::\n\n\n# Logistic Regression\n\n## Parameterisation Comparison\n\nThe following set of tables compare how to configure particular parameters / attributes of the methodologies.\n\n| Attribute | SAS
`PROC LOGISTIC` | R
`stats::glm` | Description | Note |\n|:-------------:|:-------------:|:-------------:|:--------------|:--------------|\n| Likelihood optimization algorithm | Default | Default | Fisher's scoring method (i.e., iteratively reweighted least squares (IRLS)) | For logistic regression, parameter estimates and covariance matrices estimated should be the same for both Fisher's and Newton-Raphson algorithm for maximum likelihood. |\n| Convergence criteria | Default | NA | Specifies relative gradient convergence criterion (GCONV=1E--8) | In`PROC LOGISTIC` there are three other convergence criteria which can be specified. However, there is no exact criterion that matches the criteria in `stats::glm`. |\n| Convergence criteria | NA | Default | Specifies relative difference between deviance \\< 1E--8. | |\n| Confidence interval (CI) estimation method | Default | `confint.default()` | Wald CI | In `stats::glm` in R, function confint.default() gives the Wald confidence limits; whereas function confint() gives the profile-likelihood limits. |\n| Hypothesis tests for regression coefficients | Default | Default | Wald tests, which are based on estimates for the regression coefficients and its corresponding standard error. | |\n\n: Standard Logistic Regression in SAS vs R {#tbl-1}\n\n## Numerical Comparison {#sec-num-comp}\n\nEvery effort is made to ensure that the R code employs estimation methods/ optimization algorithms/ other components that closely match (as much as possible) those used in the SAS code.\n\n### `glm` in R\n\nNote, the default fitting method in `glm` is consistent with the default fitting method in `PROC LOGISTIC` procedure.\n\n- Default fitting method in `glm` is iteratively reweighted least squares, and the documentation can be found [here](https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/glm).\n- Default fitting method for `PROC LOGISTIC` procedure is Fisher's scoring method, which is reported as part of the SAS default output, and it is equivalent to \"Iteratively reweighted least squares\" method as reported in this [documentation](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_logistic_sect033.htm).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nm1 <- stats::glm(\n wt_grp ~ age + sex + ph.ecog + meal.cal,\n data = lung2,\n family = binomial(link = \"logit\")\n)\n\n# model coefficients summary\nsummary(m1)$coefficients\n```\n:::\n\n\nNote, function `confint.default` gives the Wald confidence limits, which is the default option in SAS `PROC LOGISTIC` procedure; whereas `confint` gives the profile-likelihood limits. Conditional odds ratio is calculated by taking the exponential of the model parameters.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncbind(est = coef(m1), confint.default(m1))\n```\n:::\n\n\n### `PROC LOGISTIC` in SAS (without firth option)\n\n```sas\nPROC LOGISTIC DATA=LUNG2; # import lung\n\tMODEL WT_GRP(EVENT=\"weight_gain\") = AGE SEX PH_ECOG MEAL_CAL;\n\tods output ESTIMATEs=estimates;\nrun;\n```\n\nBelow is screenshot of output tables summarizing coefficient estimates and confidence intervals\n\n![](../images/logistic_regression/sas_logistic_estimates.png){fig-align=\"left\"}\n\n![](../images/logistic_regression/sas_logistic_ci.png){fig-align=\"left\"}\n\n### Comment on model selection\n\nAs indicated in [Logistic regression in R](https://psiaims.github.io/CAMIS/R/logistic_regr.html) and [Logistic regression in SAS](https://psiaims.github.io/CAMIS/SAS/logistic-regr.html), the chi-Sq test statistics and p-values are different when performing model selections in R vs. SAS. The reason for this discrepancy is that the chi-Sq statistics from `anova()` in R is based on deviance test using residual deviance while the chi-Sq statistics from `PROC LOGISTIC` w/ `SELECTION` option in SAS is based on Wald test using z-values squared.\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for logistic regression\n\nExact match (at 0.001 level) can be obtained using `glm` in R vs `PROC LOGISTIC` procedure (without Firth option) in SAS, for coefficient estimates, 95% CI, and for p-value.\n:::\n\n# Firth logistic regression\n\nThe following set of tables compare how to configure particular parameters / attributes of the methodologies.\n\n## Parameterisation Comparison\n\n| Attribute | SAS
`PROC LOGISTIC` w/ Firth option | R
`logistf::logistf` | Description | Note |\n|:-------------:|:--------------|:--------------|:--------------|:--------------|\n| Likelihood optimization algorithm | Default | `control =`
`logistf.control`
`(fit =“IRLS”)` | Fisher's scoring method (i.e., iteratively reweighted least squares (IRLS)) | |\n| Likelihood optimization algorithm | `TECHNIQUE = NEWTON` | Default | Newton-Raphson algorithm | |\n| Convergence criteria | Default | NA | Specifies relative gradient convergence criterion (GCONV=1E--8). | In`PROC LOGISTIC` there are three other convergence criteria which can be specified. If more than one convergence criterion is specified, the optimization is terminated as soon as one of the criteria is satisfied. |\n| Convergence criteria | NA | Default | Specifies three criteria that need to be met: the change in log likelihood is less than lconv (default is 1E-5), the maximum absolute element of the score vector is less than gconv (default is 1E-5), and the maximum absolute change in beta is less than xconv (default is 1E-5). | The gconv criteria in `logistif` is different from `GCONV` in SAS. The lconv criteria is also not exactly the same as the `ABSFCONV` or `FCONV` in `PROC LOGISTIC` in SAS, although the criteria use log likelihood. However, the `xconv` in R and `XCONV` in SAS seems to be consistent. |\n| Convergence criteria | `XCONV = 1E–8` | `control = logistf.control( xconv = 1E–8, lconv = 1, gconv = 1)` | Specifies the maximum absolute change in beta \\< 1E--8. | In `logistf`, three convergence criteria are checked at the same time. So here we use a large convergence criteria value for `lconv` and `gconv` to mimic the scenario where only `xconv` is checked. |\n| Confidence interval (CI) estimation method | Default | `pl= FALSE` | Wald CI | For `logistf`: \"Note that from version 1.24.1 on, the variance-covariance matrix is based on the second derivative of the likelihood of the augmented data rather than the original data, which proved to be a better approximation if the user chooses to set a higher value for the penalty strength.\" This could cause differences in standard error estimates in R vs SAS for Firth logistic regression, and consequently results in differences in the corresponding Wald CI estimates and hypothesis tests results (e.g., p-values). |\n| Confidence interval (CI) estimation method | `CLPARM = PL`
`CLODDS = PL` | Default | Profile likelihood-based CI | For Firth's bias-reduced logistic regression, it makes more sense to use penalized likelihood-based CI so it is consistent with the parameter estimation method which uses penalized maximum likelihood. |\n| Hypothesis tests for regression coefficients | Default | pl= FALSE | Wald tests, which are based on estimates for the regression coefficients and its corresponding standard error. | |\n| Hypothesis tests for regression coefficients | NA | Default | \"Likelihood ratio tests\", which are based on profile penalized log likelihood. | In SAS, when the model statement option `CLPARM = PL` is specified, the CI will be calculated based on profile likelihood. However, the hypothesis testing method is still a Wald method. This could cause results mismatch in the p-value. |\n\n: Firth's Bias-Reduced Logistic Regression in SAS vs R {#tbl-2}\n\n## Numerical Comparison\n\nNote that while Firth logistic regression is not required for our example dataset nonetheless we use it for demonstration purposes only.\n\n### `logistf` in R\n\n- By default, the [convergence criteria in `logistf`](https://cran.r-project.org/web/packages/logistf/logistf.pdf) specifies that three criteria need to be met at the same time, i.e., the change in log likelihood is less than lconv (default is 1E-5), the maximum absolute element of the score vector is less than gconv (default is 1E-5), and the maximum absolute change in beta is less than xconv (default is 1E-5). In SAS, the [default convergence criteria in `PROC LOGISTIC`](https://support.sas.com/documentation/cdl/en/statug/63962/HTML/default/viewer.htm#statug_logistic_sect034.htm) specifies relative gradient convergence criterion (GCONV=1E--8); while SAS also support three other convergence criteria but when there are more than one convergence criterion specified, the optimization is terminated as soon as one of the criteria is satisfied. By looking at the R pacakge/SAS documentation, the `gconv` criteria in `logistif` function is different from the `GCONV` in SAS. The `lconv` criteria is also not exactly the same as the `ABSFCONV` or `FCONV` in PROC LOGISTIC in SAS, although the criteria use log likelihood. However, similar convergence criteria might be obtained by using the maximum absolute change in parameter estimates (i.e., `xconv` in R and SAS). Therefore, for comparison with the SAS output, in `logistf` function, we use a large convergence criteria value for `lconv` and `gconv` to mimic the scenario where only `xconv` is checked, i.e., specify `logistf.control(xconv = 0.00000001, gconv = 1, lconv = 1)` for the `control` argument.\n\n- By default, `logistf` function in R computes the confidence interval estimates and hypothesis tests (including p-value) for each parameter based on profile likelihood, which is also reported in the output below. However, Wald method (confidence interval and tests) can be specified by specifying the `control` argument with [`pl = FALSE`](https://cran.r-project.org/web/packages/logistf/logistf.pdf).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfirth_mod <- logistf(\n wt_grp ~ age + sex + ph.ecog + meal.cal,\n data = lung2,\n control = logistf.control(\n fit = \"IRLS\",\n xconv = 0.00000001,\n gconv = 1,\n lconv = 1\n )\n)\nsummary(firth_mod)$coefficients\n\n## Code below would give Wald CI and tests results by adding `pl = FALSE`\n# logistf(..., pl = FALSE)\n```\n:::\n\n\nNote, function `confint` gives the profile-likelihood limits. Given the parameters from Firth's bias-reduced logistic regression is estimated using penalized maximum likelihood, `confint` function is used. Conditional odds ratio is calculated by taking the exponential of the model parameters.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncbind(est = coef(firth_mod), confint(firth_mod))\n```\n:::\n\n\n### `PROC LOGISTIC` in SAS (with firth option)\n\n- Note, by default, SAS computes confidence interval based on Wald tests. Given the parameters from Firth's method is estimated using penalized maximum likelihood, below specifies CLODDS = PL CLPARM=PL (based on profile likelihood), which is consistent with the maximization method and the R code above. However, the [default hypothesis test for the regression coefficients](https://go.documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_logistic_details50.htm) is still a Wald test, and the Chi-square statistics is calculated based on coefficient estimate and its corresponding standard error.\n\n- `XCONV` specifies relative parameter convergence criterion, which should correspond to the `xconv` in `logistf` function in R. We specify `XCONV = 0.00000001` so it should be consistent with the R code above.\n\n```sas\nPROC LOGISTIC DATA=LUNG2;\n\tMODEL WT_GRP(EVENT=\"weight gain\") = AGE SEX PH_ECOG MEAL_CAL / firth \n clodds=PL clparm=PL xconv = 0.00000001;\n\tods output ESTIMATEs=estimates;\nrun;\n```\n\nBelow is screenshot of output tables summarizing coefficient estimates and it's 95% CI\n\n![](../images/logistic_regression/sas_logistic_firth_estimates.png){fig-align=\"left\"}\n\n![](../images/logistic_regression/sas_logistic_firth_ci.png){fig-align=\"left\"}\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for Firth logistic regression\n\nExact match cannot be obtained for all estimates using `logistf` vs `PROC LOGISTIC` procedure with Firth option. More specifically:\\\n- Coefficient estimate and its 95% CI matched at 0.001 level;\\\n- Standard error are not the same (e.g., 0.02023 for age in R vs 0.02065 in SAS);\\\n- p-value is not the same (0.6288 in R for age vs 0.6348 in SAS);\\\n:::\n\n# g-computation with covariate adjustment\n\nWe compare two implementions of g-computation in SAS:\n\n1. The \"Predictive margins and average marginal effects\" [%margins](https://support.sas.com/kb/63/038.html#pur) macro. The %margins macro uses \"the delta method \\[...\\] to determine the standard errors for predictive margins and marginal effects\". Note that the %margins macro uses the `PROC GENMOD` procedure to implement the working logistic regression model and require another macro [%NLEST](https://support.sas.com/kb/58/775.html) to calculate contrasts that requires delta methodl such as risk ratio or odds ratio.\n2. The SAS code provided in the appendix of the [Ge et al. (2011)](https://journals.sagepub.com/doi/10.1177/009286151104500409) implements the method outlined in the associated paper and simulations. Note: the Ge et al. (2011) macro uses the `PROC LOGISTIC` procedure to implement the working logistic regression model. `PROC IML` is used to calculate the delta method to determine the standard errors.\n\n## Numerical Comparison\n\n### `get_marginal_effect` in R\n\nWe fit a logistic regression model with covariate adjustment to estimate the marginal treatment effect using the delta method for variance estimation: as outlined in Ge et al (2011).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## fit the model including model based variance estimation with delta method\nfit1 <- stats::glm(aval ~ trtp + bl_cov, family = \"binomial\", data = trial01) |>\n beeca::get_marginal_effect(\n trt = \"trtp\",\n method = \"Ge\",\n contrast = \"diff\",\n reference = \"0\",\n type = \"model-based\"\n )\n\n\"Marginal treatment effect\"\nfit1$marginal_est\n\n\"Standard error\"\nfit1$marginal_se\n```\n:::\n\n\n### `%Margins` macro in SAS\n\nWe now use the SAS [`%Margins`](https://support.sas.com/kb/63/038.html) macro to perform the Ge et al. (2011) method on `trial01` to estimate the marginal risk difference and it's standard error.\n\n```sas\n%Margins(data = myWork.trial01,\n class = trtp,\n classgref = first, /*Set reference to first level*/\n response = avaln,\n roptions = event='1', /*Ensure event is set to 1 = Yes */\n dist = binomial, \n model = trtp bl_cov,\n margins = trtp, \n options = cl diff reverse, /*Specify risk difference contrast and \n direction of treatment effect is correct*/\n link = logit); /*Specify logit link function */\n\t\n** Store output data sets ; \ndata myWork.margins_trt_estimates;\n set work._MARGINS;\nrun;\n\ndata myWork.margins_trt_diffs;\n set work._DIFFSPM;\nrun;\n```\n\n![](../images/logistic_regression/sas_logistic_gcomp_margins.png){fig-align=\"left\"}\n\n### `%LR` macro in SAS (Ge et al, 2011)\n\n```sas\n%LR(data = myWork.trial01, /* input data set */\n\tvar1 = bl_cov, /* continuous covariates in the logistic regression */\n\tvar2 = trtp, /* categorical covariates in the logistic regression */\n\tp1 = 1, /* number of continuous covariates in the logistic regression */\n\tp2 = 1, /* number of categorical covariates in the logistic regression */\n\tresp = avaln, /* binary response variable in the logistic regression */\n\tntrt = 1); /* position of the treatment variable in the categorical covariates */\n\t\ndata myWork.ge_macro_trt_diffs;\n set work.geout;\nrun;\n```\n\n![](../images/logistic_regression/sas_logistic_gcomp_ge.png){fig-align=\"left\"}\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for g-computation with covariate adjustment\n\nExact match at the 0.001 level.\n:::\n\n# Final remarks\n\nIn summary, there are a few things to be aware of when comparing logistic regression results in R vs SAS. It is crucial to carefully manage the input parameters for each model to ensure they are configured similarly for logistic regression analyses. As highlighted also in [Logistic Regression in SAS](https://psiaims.github.io/CAMIS/SAS/logistic-regr.html), the variable parameterization is also important for modelling and interpretation, ensuring the types of variable (continuous vs. categorical) and reference values of categorical variable are applied as expected.\n\n1. **Likelihood optimization method**\n\n- The default likelihood optimization method in `glm` and `PROC LOGISTIC` is the same (i.e., Fisher's scoring method or iteratively reweighted least squares (IRLS)).\n\n- However, the default optimization method in `logistf` is Newton-Raphson, which can be modified into IRLS via `control = logistf.control(fit = “IRLS”)`. Alternatively, one could specify `technique = newton` in the model statement in SAS to modify the likelihood optimization method.\n\n2. **Convergence criteria**\n\n- Although both SAS and R allows options to modify the convergence criteria, the criteria does not seem to be exactly the same, which could cause results mismatch in some scenarios.\n\n- The [default convergence criteria in `PROC LOGISTIC`](https://support.sas.com/documentation/cdl/en/statug/63962/HTML/default/viewer.htm#statug_logistic_sect034.htm) specifies the relative gradient convergence criterion; where the [default convergence criteria in `glm`](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/glm.control.html) specifies relative difference between deviance.\n\n- The default setting in logistf have checked more than one convergence criterion in its algorithm (i.e., [change in log likelihood, derivative of the log likelihood and parameter estimates](https://cran.r-project.org/web/packages/logistf/logistf.pdf)). One could specify a very large value for two of the criteria in order to mimic the scenario where only one criterion is checked (e.g., `control = logistf.control (xconv = 0.00000001, lconv = 1, gconv = 1`) in `logistf` in R should be consistent to the option of `xconv = 0.00000001` in SAS).\n\n3. **Confidence interval**\n\n- The `confint()` function in R will computes profile likelihood based CI for `glm` fitted model. However, in SAS, the default confidence interval is Wald CI. To match the default CI calculation in SAS for `glm` fitted model, use `confint.default()` function in R.\n\n- Nevertheless, Firth's biased-reduced logistic regression estimates parameter using penalized maximum likelihood, it makes more sense to use `confint()` function for `logistf` fitted model. In the meantime, in SAS, when fitting a Firth's logistic regression, it is also better to specify the model statement option `clparm = pl` which will also generate profile penalized likelihood CI.\n\n- We shall note that in the Firth logistic regression numerical example, the estimated standard errors does not match, but the CIs match at 0.001 level. This is because the CI was estimated based on profile penalized likelihood in R and SAS, and please see the next discussion point for potential reasons about differences between the estimated standard error. (I have compared Wald CIs estimated in R vs SAS, which could not match. This make sense as Wald CIs are calculated based on the estimated standard errors.)\n\n4. **Hypothesis test and p-value**\n\n- The default hypothesis tests for the regression coefficients are the same in `glm` and `PROC LOGISTIC`, which are both Wald tests and calculated based on estimates for the regression coefficients and its corresponding standard error.\n\n- As for `logistf` function, the default hypothesis testing method is based on profile penalized log likelihood (source code [here](https://github.com/georgheinze/logistf/blob/master/R/logistf.R)). And it was noted in the [R documentation](https://cran.r-project.org/web/packages/logistf/logistf.pdf) that, *\"from version 1.24.1 on, the variance-covariance matrix is based on the second derivative of the likelihood of the augmented data rather than the original data, which proved to be a better approximation if the user chooses to set a higher value for the penalty strength.\"* This could cause difference in the estimate of standard error in R vs SAS for Firth logistic regression, and consequently results in differences in the corresponding Wald CI estimates and hypothesis tests results (e.g., p-values).\n\n- Wald method can be used in a `logistf` function in R by specifying `pl = FALSE` in the `control` argument, which should correspond to the method used in SAS to calculate p-value. However, when specifying `pl = FALSE`, the CI is also calculated using Wald method.\n\n# Reference\n\n- A relevant blog [here](https://sas-and-r.blogspot.com/2010/11/example-815-firth-logistic-regression.html) (check comments in the blog).\n- [PROC LOGISTIC statement documentation in SAS](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_logistic_sect004.htm).\n- [Reference manual for `logistf` package in R](https://cran.r-project.org/web/packages/logistf/logistf.pdf).\n- [GitHub repository for `logistf` package in R](https://github.com/georgheinze/logistf).\n- [GitHub repository for a SAS procedure about Firth logistic regression authored by the author of `logistf` R package](https://github.com/georgheinze/flicflac/tree/master/LogisticRegression), which was based on PROC IML instead of PROC LOGISTIC and was probably authored before the availability of Firth option in PROC LOGISTIC statement in SAS.\n- Ge, Miaomiao, et al. \"Covariate-adjusted difference in proportions from clinical trials using logistic regression and weighted risk differences.\" Drug information journal: DIJ/Drug Information Association 45 (2011): 481-493.\n- SAS Institute Inc. [\"Predictive margins and average marginal effects.\"](https://support.sas.com/kb/63/038.html) (Last Published: 13 Dec 2023)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_mcnemar/execute-results/html.json b/_freeze/Comp/r-sas_mcnemar/execute-results/html.json index 43f5e6b5c..9a94270f3 100644 --- a/_freeze/Comp/r-sas_mcnemar/execute-results/html.json +++ b/_freeze/Comp/r-sas_mcnemar/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "8f8d04defa0a8b2c677cddade94f26f1", + "hash": "4461d11ad8af1cabbc46387fee603562", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R v SAS McNemar's test\"\nexecute: \n eval: false\n---\n\n## Introduction\n\nMcNemar's test is a test of marginal homogeneity. That is used with 2x2 contingency tables, when both x and y are binary factors.\n\n## General Comparison Table\n\nThe following table provides an overview of the support and results comparability between R and SAS for the new analysis point.\n\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n|---------------|---------------|---------------|---------------|---------------|\n| McNemar's Chi-Squared test | [Yes](../R/mcnemar.qmd) | [Yes](../SAS/mcnemar.qmd) | ✅ | By default SAS doesn't include the continuity correction. In R use {stats} or {coin} |\n| Cohen's Kappa CI | [Yes](../R/mcnemar.qmd) | [Yes](../SAS/mcnemar.qmd) | ✅ | In R use {vcd} |\n\nIn R,the {stats} or the {coin} package can be used to calculate McNemar. The {coin} package has the same defaults as SAS. But, using either of these packages, the first step is to calculate a frequency table, using the table function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(coin)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: survival\n```\n\n\n:::\n\n```{.r .cell-code}\ncolds <- read.csv(\n file = \"../data/colds.csv\"\n)\nfreq_tbl <- table(\"age12\" = colds$age12, \"age14\" = colds$age14)\nfreq_tbl\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n age14\nage12 No Yes\n No 707 256\n Yes 144 212\n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::mh_test(freq_tbl)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: response by\n\t conditions (age12, age14) \n\t stratified by block\nchi-squared = 31.36, df = 1, p-value = 2.144e-08\n```\n\n\n:::\n:::\n\n\nIn order to get Cohen's Kappa an additional package is needed.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(vcd)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: grid\n```\n\n\n:::\n\n```{.r .cell-code}\ncohen_kappa <- vcd::Kappa(freq_tbl)\ncohen_kappa\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n value ASE z Pr(>|z|)\nUnweighted 0.2999 0.02733 10.97 5.07e-28\nWeighted 0.2999 0.02733 10.97 5.07e-28\n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(cohen_kappa, level = 0.95)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n \nKappa lwr upr\n Unweighted 0.2463654 0.3534966\n Weighted 0.2463654 0.3534966\n```\n\n\n:::\n:::\n\n\nThe FREQ procedure can be used in SAS with the AGREE option to run the McNemar test, with OR, and RISKDIFF options stated for production of odds ratios and risk difference. These options were added as `epibasix::mcNemar` outputs the odds ratio and risk difference with confidence limits as default. In contrast to R, SAS outputs the Kappa coefficients with confident limits as default.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data=colds;\n tables age12*age14 / agree or riskdiff;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mcnemar/sas-mcnemar.png){fig-align='center' width=40%}\n:::\n:::\n\n\n## Summary and Recommendation\n\nWhen calculating the odds ratio and risk difference confidence limits, SAS is not treating the data as matched-pairs. There is advice on the SAS blog and SAS support page to amend this, which requires a lot of additional coding.\n\n{stats} is using Edward's continuity correction by default, but this can be removed. In contrast, there is no option to include Edward's continuity correction in SAS, but this can be manually coded to agree with R. However, its use is controversial due to being seen as overly conservative.\n\nThere is another R package that is sometimes used to calculate McNemar's, called `epibasix`. This package is no longer being maintained, and there was no documentation available for certain methods used. Therefore, the use of the `epibasix` package is advised against and other packages may be more suitable.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-24\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P coin * 1.4-3 2023-09-27 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", + "markdown": "---\ntitle: \"R v SAS McNemar's test\"\n---\n\n## Introduction\n\nMcNemar's test is a test of marginal homogeneity. That is used with 2x2 contingency tables, when both x and y are binary factors.\n\n## General Comparison Table\n\nThe following table provides an overview of the support and results comparability between R and SAS for the new analysis point.\n\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n|---------------|---------------|---------------|---------------|---------------|\n| McNemar's Chi-Squared test | [Yes](../R/mcnemar.qmd) | [Yes](../SAS/mcnemar.qmd) | ✅ | By default SAS doesn't include the continuity correction. In R use {stats} or {coin} |\n| Cohen's Kappa CI | [Yes](../R/mcnemar.qmd) | [Yes](../SAS/mcnemar.qmd) | ✅ | In R use {vcd} |\n\nIn R,the {stats} or the {coin} package can be used to calculate McNemar. The {coin} package has the same defaults as SAS. But, using either of these packages, the first step is to calculate a frequency table, using the table function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(coin)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: survival\n```\n\n\n:::\n\n```{.r .cell-code}\ncolds <- read.csv(\n file = \"../data/colds.csv\"\n)\nfreq_tbl <- table(\"age12\" = colds$age12, \"age14\" = colds$age14)\nfreq_tbl\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n age14\nage12 No Yes\n No 707 256\n Yes 144 212\n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::mh_test(freq_tbl)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: response by\n\t conditions (age12, age14) \n\t stratified by block\nchi-squared = 31.36, df = 1, p-value = 2.144e-08\n```\n\n\n:::\n:::\n\n\nIn order to get Cohen's Kappa an additional package is needed.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(vcd)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: grid\n```\n\n\n:::\n\n```{.r .cell-code}\ncohen_kappa <- vcd::Kappa(freq_tbl)\ncohen_kappa\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n value ASE z Pr(>|z|)\nUnweighted 0.2999 0.02733 10.97 5.07e-28\nWeighted 0.2999 0.02733 10.97 5.07e-28\n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(cohen_kappa, level = 0.95)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n \nKappa lwr upr\n Unweighted 0.2463654 0.3534966\n Weighted 0.2463654 0.3534966\n```\n\n\n:::\n:::\n\n\nThe FREQ procedure can be used in SAS with the AGREE option to run the McNemar test, with OR, and RISKDIFF options stated for production of odds ratios and risk difference. These options were added as `epibasix::mcNemar` outputs the odds ratio and risk difference with confidence limits as default. In contrast to R, SAS outputs the Kappa coefficients with confident limits as default.\n\n```sas\nproc freq data=colds;\n tables age12*age14 / agree or riskdiff;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mcnemar/sas-mcnemar.png){fig-align='center' width=40%}\n:::\n:::\n\n\n## Summary and Recommendation\n\nWhen calculating the odds ratio and risk difference confidence limits, SAS is not treating the data as matched-pairs. There is advice on the SAS blog and SAS support page to amend this, which requires a lot of additional coding.\n\n{stats} is using Edward's continuity correction by default, but this can be removed. In contrast, there is no option to include Edward's continuity correction in SAS, but this can be manually coded to agree with R. However, its use is controversial due to being seen as overly conservative.\n\nThere is another R package that is sometimes used to calculate McNemar's, called `epibasix`. This package is no longer being maintained, and there was no documentation available for certain methods used. Therefore, the use of the `epibasix` package is advised against and other packages may be more suitable.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P coin * 1.4-3 2023-09-27 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_mmrm/execute-results/html.json b/_freeze/Comp/r-sas_mmrm/execute-results/html.json index 865ee598a..e85a82de0 100644 --- a/_freeze/Comp/r-sas_mmrm/execute-results/html.json +++ b/_freeze/Comp/r-sas_mmrm/execute-results/html.json @@ -1,9 +1,11 @@ { - "hash": "402c5e7d123c00390fd87e00943d50a3", + "hash": "5737252e803780338f43a1dc0b2723d4", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS MMRM\"\nmessage: false\nwarning: false\necho: true\neval: false\n---\n\n\n\n\n\n# Introduction\n\nIn this vignette we briefly compare the `mmrm::mmrm`, SAS's `PROC GLIMMIX`, `nlme::gls`, `lme4::lmer`, and `glmmTMB::glmmTMB` functions for fitting mixed models for repeated measures (MMRMs). A primary difference in these implementations lies in the covariance structures that are supported \"out of the box\". In particular, `PROC GLIMMIX` and `mmrm` are the only procedures which provide support for many of the most common MMRM covariance structures. Most covariance structures can be implemented in `gls`, though users are required to define them manually. `lmer` and `glmmTMB` are more limited. We find that `mmmrm` converges more quickly than other R implementations while also producing estimates that are virtually identical to `PROC GLIMMIX`'s.\n\nNOTE: that factor parameterization in the model, and the default order that SAS and R choose reference levels for factors are different. Hence, the first thing to ensure when trying to replicate MMRMs in SAS and R is that you have these options aligned. See [parameterization in SAS](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_mixed_sect023.htm) for more detail on SAS parameterization. This can be matched in R using the default `contr.treatment` option. It is reccommended to specify the level of any factors you want to use as the reference in both SAS and R.\n\nIn SAS this is done on the class row: 'class armcd(ref=\"ARM A\")' \nIn R, this is done using `relevel(ARMCD,ref=\"ARM A\")` in addition to adding the base option to the contrast() statement when selecting the contr.treatment parameterization. \n`contrasts(ARMCD) <- contr.treatment(levels(ARMCD), base=which(levels(ARMCD)==\"ARM A\"))`\n\n# Datasets\n\nTwo datasets are used to illustrate model fitting with the `mmrm`, `lme4`, `nlme`, `glmmTMB` R packages as well as `PROC GLIMMIX`. These data are also used to compare these implementations' operating characteristics.\n\n## FEV Data\n\nThe FEV dataset contains measurements of FEV1 (forced expired volume in one second), a measure of how quickly the lungs can be emptied. Low levels of FEV1 may indicate chronic obstructive pulmonary disease (COPD). It is summarized below.\n\n```default\n Stratified by ARMCD\n Overall PBO TRT\n n 800 420 380\n USUBJID (%)\n PT[1-200] 200 105 (52.5) 95 (47.5)\n AVISIT\n VIS1 200 105 95\n VIS2 200 105 95\n VIS3 200 105 95\n VIS4 200 105 95\n RACE (%)\n Asian 280 (35.0) 152 (36.2) 128 (33.7)\n Black or African American 300 (37.5) 184 (43.8) 116 (30.5)\n White 220 (27.5) 84 (20.0) 136 (35.8)\n SEX = Female (%) 424 (53.0) 220 (52.4) 204 (53.7)\n FEV1_BL (mean (SD)) 40.19 (9.12) 40.46 (8.84) 39.90 (9.42)\n FEV1 (mean (SD)) 42.30 (9.32) 40.24 (8.67) 44.45 (9.51)\n WEIGHT (mean (SD)) 0.52 (0.23) 0.52 (0.23) 0.51 (0.23)\n VISITN (mean (SD)) 2.50 (1.12) 2.50 (1.12) 2.50 (1.12)\n VISITN2 (mean (SD)) -0.02 (1.03) 0.01 (1.07) -0.04 (0.98)\n```\n\n## BCVA Data\n\nThe BCVA dataset contains data from a randomized longitudinal ophthalmology trial evaluating the change in baseline corrected visual acuity (BCVA) over the course of 10 visits. BCVA corresponds to the number of letters read from a visual acuity chart. A summary of the data is given below:\n\n```default\n Stratified by ARMCD\n Overall CTL TRT\n n 8605 4123 4482\n USUBJID (%)\n PT[1-1000] 1000 494 (49.4) 506 (50.6)\n AVISIT\n VIS1 983 482 501\n VIS2 980 481 499\n VIS3 960 471 489\n VIS4 946 458 488\n VIS5 925 454 471\n VIS6 868 410 458\n VIS7 816 388 428\n VIS8 791 371 420\n VIS9 719 327 392\n VIS10 617 281 336\n RACE (%)\n Asian 297 (29.7) 151 (30.6) 146 (28.9)\n Black or African American 317 (31.7) 149 (30.1) 168 (33.2)\n White 386 (38.6) 194 (39.3) 192 (37.9)\n BCVA_BL (mean (SD)) 75.12 (9.93) 74.90 (9.76) 75.40 (10.1)\n BCVA_CHG (mean (SD))\n VIS1 5.59 (1.31) 5.32 (1.23) 5.86 (1.33)\n VIS10 9.18 (2.91) 7.49 (2.58) 10.60 (2.36)\n```\n\n# Model Implementations {.tabset}\n\nListed below are some of the most commonly used covariance structures used when fitting MMRMs. We indicate which matrices are available \"out of the box\" for each implementation considered in this vignette. Note that this table is not exhaustive; `PROC GLIMMIX` and `glmmTMB` support additional spatial covariance structures.\n\n| Covariance structures | `mmrm` | `PROC GLIMMIX` | `gls` | `lmer` | `glmmTMB` |\n|:-------------------:|:---------:|:---------:|:---------:|:---------:|:---------:|\n| Ante-dependence (heterogeneous) | X | X | | | |\n| Ante-dependence (homogeneous) | X | | | | |\n| Auto-regressive (heterogeneous) | X | X | X | | |\n| Auto-regressive (homogeneous) | X | X | X | | X |\n| Compound symmetry (heterogeneous) | X | X | X | | X |\n| Compound symmetry (homogeneous) | X | X | X | | |\n| Spatial exponential | X | X | X | | X |\n| Toeplitz (heterogeneous) | X | X | | | X |\n| Toeplitz (homogeneous) | X | X | | | |\n| Unstructured | X | X | X | X | X |\n\nCode for fitting MMRMs to the FEV data using each of the considered functions and covariance structures are provided below. Fixed effects for the visit number, treatment assignment and the interaction between the two are modeled.\n\n## Ante-dependence (heterogeneous)\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=ANTE(1);\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + adh(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n## Ante-dependence (homogeneous)\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + ad(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n## Auto-regressive (heterogeneous)\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=ARH(1);\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + ar1h(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCAR1(form = ~ AVISIT | USUBJID),\n weights = nlme::varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n## Auto-regressive (homogeneous)\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = ARMCD|AVISIT / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=AR(1);\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + ar1(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCAR1(form = ~ AVISIT | USUBJID),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + ar1(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Compound symmetry (heterogeneous)\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=CSH;\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + csh(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCompSymm(form = ~ AVISIT | USUBJID),\n weights = nlme::varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + cs(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Compound symmetry (homogeneous)\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=CS;\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + cs(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCompSymm(form = ~ AVISIT | USUBJID),\n na.action = na.omit\n)\n```\n:::\n\n\n## Spatial exponential\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM / subject=USUBJID type=sp(exp)(visitn) rcorr;\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + sp_exp(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = corExp(form = ~ AVISIT | USUBJID),\n weights = varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# NOTE: requires use of coordinates\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + exp(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Toeplitz (heterogeneous)\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=TOEPH;\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + toeph(AVISIT | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + toep(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Toeplitz (homogeneous)\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=TOEP;\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + toep(AVISIT | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n## Unstructured\n\n### `PROC GLIMMIX`\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = ARMCD|AVISIT / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=un;\nRUN;\n```\n:::\n\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + us(AVISIT | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corSymm(form = ~ AVISIT | USUBJID),\n weights = nlme::varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n### `lmer`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlme4::lmer(\n FEV1 ~ ARMCD * AVISIT + (0 + AVISIT | USUBJID),\n data = fev_data,\n control = lme4::lmerControl(check.nobs.vs.nRE = \"ignore\"),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + us(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n# Benchmarking\n\nNext, the MMRM fitting procedures are compared using the FEV and BCVA datasets. FEV1 measurements are modeled as a function of race, treatment arm, visit number, and the interaction between the treatment arm and the visit number. Change in BCVA is assumed to be a function of race, baseline BCVA, treatment arm, visit number, and the treatment--visit interaction. In both datasets, repeated measures are modeled using an unstructured covariance matrix. The implementations' convergence times are evaluated first, followed by a comparison of their estimates. Finally, we fit these procedures on simulated BCVA-like data to assess the impact of missingness on convergence rates.\n\n## Convergence Times\n\n### FEV Data\n\nThe `mmrm`, `PROC GLIMMIX`, `gls`, `lmer`, and `glmmTMB` functions are applied to the FEV dataset 10 times. The convergence times are recorded for each replicate and are reported in the table below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n\nTable: Comparison of convergence times: milliseconds\n\n|Implementation | Median| First Quartile| Third Quartile|\n|:--------------|------:|--------------:|--------------:|\n|mmrm | 56.15| 55.76| 56.30|\n|PROC GLIMMIX | 100.00| 100.00| 100.00|\n|lmer | 247.02| 245.25| 257.46|\n|gls | 687.63| 683.50| 692.45|\n|glmmTMB | 715.90| 708.70| 721.57|\n\n\n:::\n:::\n\n\nIt is clear from these results that `mmrm` converges significantly faster than other R functions. Though not demonstrated here, this is generally true regardless of the sample size and covariance structure used. `mmrm` is faster than `PROC GLIMMIX`.\n\n### BCVA Data\n\nThe MMRM implementations are now applied to the BCVA dataset 10 times. The convergence times are presented below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n\nTable: Comparison of convergence times: seconds\n\n|Implementation | Median| First Quartile| Third Quartile|\n|:--------------|------:|--------------:|--------------:|\n|mmrm | 3.36| 3.32| 3.46|\n|glmmTMB | 18.65| 18.14| 18.87|\n|PROC GLIMMIX | 36.25| 36.17| 36.29|\n|gls | 164.36| 158.61| 165.93|\n|lmer | 165.26| 157.46| 166.42|\n\n\n:::\n:::\n\n\nWe again find that `mmrm` produces the fastest convergence times on average.\n\n## Marginal Treatment Effect Estimates Comparison\n\nWe next estimate the marginal mean treatment effects for each visit in the FEV and BCVA datasets using the MMRM fitting procedures. All R implementations' estimates are reported relative to `PROC GLIMMIX`'s estimates. Convergence status is also reported.\n\n### FEV Data\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](../images/mmrm/review-treatment-fev-1.png){width=100%}\n:::\n:::\n\n\nThe R procedures' estimates are very similar to those output by `PROC GLIMMIX`, though `mmrm` and `gls` generate the estimates that are closest to those produced when using SAS. All methods converge using their default optimization arguments.\n\n### BCVA Data\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](../images/mmrm/review-treatment-bcva-1.png){width=100%}\n:::\n\n::: {.cell-output-display}\n![](../images/mmrm/review-treatment-bcva-2.png){width=100%}\n:::\n:::\n\n\n`mmrm`, `gls` and `lmer` produce estimates that are virtually identical to `PROC GLIMMIX`'s, while `glmmTMB` does not. This is likely explained by `glmmTMB`'s failure to converge. Note too that `lmer` fails to converge.\n\n## Impact of Missing Data on Convergence Rates\n\nThe results of the previous benchmark suggest that the amount of patients missing from later time points affect certain implementations' capacity to converge. We investigate this further by simulating data using a data-generating process similar to that of the BCVA datasets, though with various rates of patient dropout.\n\nTen datasets of 200 patients are generated each of the following levels of missingness: none, mild, moderate, and high. In all scenarios, observations are missing at random. The number patients observed at each visit is obtained for one replicated dataset at each level of missingness is presented in the table below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n\nTable: Number of patients per visit\n\n| | none| mild| moderate| high|\n|:-----|----:|-----:|--------:|-----:|\n|VIS01 | 200| 196.7| 197.6| 188.1|\n|VIS02 | 200| 195.4| 194.4| 182.4|\n|VIS03 | 200| 195.1| 190.7| 175.2|\n|VIS04 | 200| 194.1| 188.4| 162.8|\n|VIS05 | 200| 191.6| 182.5| 142.7|\n|VIS06 | 200| 188.2| 177.3| 125.4|\n|VIS07 | 200| 184.6| 168.0| 105.9|\n|VIS08 | 200| 178.5| 155.4| 82.6|\n|VIS09 | 200| 175.3| 139.9| 58.1|\n|VIS10 | 200| 164.1| 124.0| 39.5|\n\n\n:::\n:::\n\n\nThe convergence rates of all implementations for stratified by missingness level is presented in the plot below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](../images/mmrm/review-convergence-rate-missingness-1.png){width=100%}\n:::\n:::\n\n\n`mmrm`, `gls`, and `PROC GLIMMIX` are resilient to missingness, only exhibiting some convergence problems in the scenarios with the most missingness. These implementations converged in all the other scenarios' replicates. `glmmTMB`, on the other hand, has convergence issues in the no-, mild-, and high-missingness datasets, with the worst convergence rate occurring in the datasets with the most dropout. Finally, `lmer` is unreliable in all scenarios, suggesting that it's convergence issues stem from something other than the missing observations.\n\nNote that the default optimization schemes are used for each method; these schemes can be modified to potentially improve convergence rates.\n\nA more comprehensive simulation study using data-generating processes similar to the one used here is outlined in the [`simulations/missing-data-benchmarks`](https://github.com/openpharma/mmrm/tree/main/simulations/missing-data-benchmarks) subdirectory. In addition to assessing the effect of missing data on software convergence rates, we also evaluate these methods' fit times and empirical bias, variance, 95% coverage rates, type I error rates and type II error rates. `mmrm` is found to be the most most robust software for fitting MMRMs in scenarios where a large proportion of patients are missing from the last time points. Additionally, `mmrm` has the fastest average fit times regardless of the amount of missingness. All implementations considered produce similar empirical biases, variances, 95% coverage rates, type I error rates and type II error rates.\n", - "supporting": [], + "markdown": "---\ntitle: \"R vs SAS MMRM\"\nexecute:\n message: false\n warning: false\n echo: true\n eval: false\n---\n\n\n\n\n\n# Introduction\n\nIn this vignette we briefly compare the `mmrm::mmrm`, SAS's `PROC GLIMMIX`, `nlme::gls`, `lme4::lmer`, and `glmmTMB::glmmTMB` functions for fitting mixed models for repeated measures (MMRMs). A primary difference in these implementations lies in the covariance structures that are supported \"out of the box\". In particular, `PROC GLIMMIX` and `mmrm` are the only procedures which provide support for many of the most common MMRM covariance structures. Most covariance structures can be implemented in `gls`, though users are required to define them manually. `lmer` and `glmmTMB` are more limited. We find that `mmrm` converges more quickly than other R implementations while also producing estimates that are virtually identical to `PROC GLIMMIX`'s.\n\n::: {.callout-note}\nFactor parameterization in the model, and the default order that SAS and R choose reference levels for factors are different. Hence, the first thing to ensure when trying to replicate MMRMs in SAS and R is that you have these options aligned. See [parameterization in SAS](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_mixed_sect023.htm) for more detail on SAS parameterization. This can be matched in R using the default `contr.treatment` option. It is recommended to specify the level of any factors you want to use as the reference in both SAS and R.\n\nIn SAS, this is done on the class row:\n\n```default\nclass armcd(ref=\"ARM A\")\n```\n\nIn R, this is done using `relevel(ARMCD, ref = \"ARM A\")` in addition to adding the base option to the `contrast()` statement when selecting the `contr.treatment` parameterization:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontrasts(ARMCD) <- contr.treatment(levels(ARMCD), base = which(levels(ARMCD) == \"ARM A\"))\n```\n:::\n\n:::\n\n# Datasets\n\nTwo datasets are used to illustrate model fitting with the `mmrm`, `lme4`, `nlme`, `glmmTMB` R packages as well as `PROC GLIMMIX`. These data are also used to compare these implementations' operating characteristics.\n\n## FEV Data\n\nThe FEV dataset contains measurements of FEV1 (forced expired volume in one second), a measure of how quickly the lungs can be emptied. Low levels of FEV1 may indicate chronic obstructive pulmonary disease (COPD). It is summarized below.\n\n```default\n Stratified by ARMCD\n Overall PBO TRT\n n 800 420 380\n USUBJID (%)\n PT[1-200] 200 105 (52.5) 95 (47.5)\n AVISIT\n VIS1 200 105 95\n VIS2 200 105 95\n VIS3 200 105 95\n VIS4 200 105 95\n RACE (%)\n Asian 280 (35.0) 152 (36.2) 128 (33.7)\n Black or African American 300 (37.5) 184 (43.8) 116 (30.5)\n White 220 (27.5) 84 (20.0) 136 (35.8)\n SEX = Female (%) 424 (53.0) 220 (52.4) 204 (53.7)\n FEV1_BL (mean (SD)) 40.19 (9.12) 40.46 (8.84) 39.90 (9.42)\n FEV1 (mean (SD)) 42.30 (9.32) 40.24 (8.67) 44.45 (9.51)\n WEIGHT (mean (SD)) 0.52 (0.23) 0.52 (0.23) 0.51 (0.23)\n VISITN (mean (SD)) 2.50 (1.12) 2.50 (1.12) 2.50 (1.12)\n VISITN2 (mean (SD)) -0.02 (1.03) 0.01 (1.07) -0.04 (0.98)\n```\n\n## BCVA Data\n\nThe BCVA dataset contains data from a randomized longitudinal ophthalmology trial evaluating the change in baseline corrected visual acuity (BCVA) over the course of 10 visits. BCVA corresponds to the number of letters read from a visual acuity chart. A summary of the data is given below:\n\n```default\n Stratified by ARMCD\n Overall CTL TRT\n n 8605 4123 4482\n USUBJID (%)\n PT[1-1000] 1000 494 (49.4) 506 (50.6)\n AVISIT\n VIS1 983 482 501\n VIS2 980 481 499\n VIS3 960 471 489\n VIS4 946 458 488\n VIS5 925 454 471\n VIS6 868 410 458\n VIS7 816 388 428\n VIS8 791 371 420\n VIS9 719 327 392\n VIS10 617 281 336\n RACE (%)\n Asian 297 (29.7) 151 (30.6) 146 (28.9)\n Black or African American 317 (31.7) 149 (30.1) 168 (33.2)\n White 386 (38.6) 194 (39.3) 192 (37.9)\n BCVA_BL (mean (SD)) 75.12 (9.93) 74.90 (9.76) 75.40 (10.1)\n BCVA_CHG (mean (SD))\n VIS1 5.59 (1.31) 5.32 (1.23) 5.86 (1.33)\n VIS10 9.18 (2.91) 7.49 (2.58) 10.60 (2.36)\n```\n\n# Model Implementations {.tabset}\n\nListed below are some of the most commonly used covariance structures used when fitting MMRMs. We indicate which matrices are available \"out of the box\" for each implementation considered in this vignette. Note that this table is not exhaustive; `PROC GLIMMIX` and `glmmTMB` support additional spatial covariance structures.\n\n| Covariance structures | `mmrm` | `PROC GLIMMIX` | `gls` | `lmer` | `glmmTMB` |\n|:-------------------:|:---------:|:---------:|:---------:|:---------:|:---------:|\n| Ante-dependence (heterogeneous) | X | X | | | |\n| Ante-dependence (homogeneous) | X | | | | |\n| Auto-regressive (heterogeneous) | X | X | X | | |\n| Auto-regressive (homogeneous) | X | X | X | | X |\n| Compound symmetry (heterogeneous) | X | X | X | | X |\n| Compound symmetry (homogeneous) | X | X | X | | |\n| Spatial exponential | X | X | X | | X |\n| Toeplitz (heterogeneous) | X | X | | | X |\n| Toeplitz (homogeneous) | X | X | | | |\n| Unstructured | X | X | X | X | X |\n\nCode for fitting MMRMs to the FEV data using each of the considered functions and covariance structures are provided below. Fixed effects for the visit number, treatment assignment and the interaction between the two are modeled.\n\n## Ante-dependence (heterogeneous)\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=ANTE(1);\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + adh(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n## Ante-dependence (homogeneous)\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + ad(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n## Auto-regressive (heterogeneous)\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=ARH(1);\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + ar1h(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCAR1(form = ~ AVISIT | USUBJID),\n weights = nlme::varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n## Auto-regressive (homogeneous)\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = ARMCD|AVISIT / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=AR(1);\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + ar1(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCAR1(form = ~ AVISIT | USUBJID),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + ar1(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Compound symmetry (heterogeneous)\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=CSH;\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + csh(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCompSymm(form = ~ AVISIT | USUBJID),\n weights = nlme::varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + cs(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Compound symmetry (homogeneous)\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=CS;\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + cs(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corCompSymm(form = ~ AVISIT | USUBJID),\n na.action = na.omit\n)\n```\n:::\n\n\n## Spatial exponential\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM / subject=USUBJID type=sp(exp)(visitn) rcorr;\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + sp_exp(VISITN | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = corExp(form = ~ AVISIT | USUBJID),\n weights = varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# NOTE: requires use of coordinates\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + exp(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Toeplitz (heterogeneous)\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=TOEPH;\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + toeph(AVISIT | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + toep(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n## Toeplitz (homogeneous)\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = AVISIT|ARMCD / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=TOEP;\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + toep(AVISIT | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n## Unstructured\n\n### `PROC GLIMMIX`\n\n```sas\nPROC GLIMMIX DATA = fev_data;\n CLASS AVISIT(ref = 'VIS1') ARMCD(ref = 'PBO') USUBJID;\n MODEL FEV1 = ARMCD|AVISIT / ddfm=satterthwaite solution chisq;\n RANDOM AVISIT / subject=USUBJID type=un;\nRUN;\n```\n\n### `mmrm`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm::mmrm(\n formula = FEV1 ~ ARMCD * AVISIT + us(AVISIT | USUBJID),\n data = fev_data\n)\n```\n:::\n\n\n### `gls`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnlme::gls(\n formula = FEV1 ~ ARMCD * AVISIT,\n data = fev_data,\n correlation = nlme::corSymm(form = ~ AVISIT | USUBJID),\n weights = nlme::varIdent(form = ~ 1 | AVISIT),\n na.action = na.omit\n)\n```\n:::\n\n\n### `lmer`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlme4::lmer(\n FEV1 ~ ARMCD * AVISIT + (0 + AVISIT | USUBJID),\n data = fev_data,\n control = lme4::lmerControl(check.nobs.vs.nRE = \"ignore\"),\n na.action = na.omit\n)\n```\n:::\n\n\n### `glmmTMB`\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglmmTMB::glmmTMB(\n FEV1 ~ ARMCD * AVISIT + us(0 + AVISIT | USUBJID),\n dispformula = ~0,\n data = fev_data\n)\n```\n:::\n\n\n# Benchmarking\n\nNext, the MMRM fitting procedures are compared using the FEV and BCVA datasets. FEV1 measurements are modeled as a function of race, treatment arm, visit number, and the interaction between the treatment arm and the visit number. Change in BCVA is assumed to be a function of race, baseline BCVA, treatment arm, visit number, and the treatment--visit interaction. In both datasets, repeated measures are modeled using an unstructured covariance matrix. The implementations' convergence times are evaluated first, followed by a comparison of their estimates. Finally, we fit these procedures on simulated BCVA-like data to assess the impact of missingness on convergence rates.\n\n## Convergence Times\n\n### FEV Data\n\nThe `mmrm`, `PROC GLIMMIX`, `gls`, `lmer`, and `glmmTMB` functions are applied to the FEV dataset 10 times. The convergence times are recorded for each replicate and are reported in the table below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n\nTable: Comparison of convergence times: milliseconds\n\n|Implementation | Median| First Quartile| Third Quartile|\n|:--------------|------:|--------------:|--------------:|\n|mmrm | 56.15| 55.76| 56.30|\n|PROC GLIMMIX | 100.00| 100.00| 100.00|\n|lmer | 247.02| 245.25| 257.46|\n|gls | 687.63| 683.50| 692.45|\n|glmmTMB | 715.90| 708.70| 721.57|\n\n\n:::\n:::\n\n\nIt is clear from these results that `mmrm` converges significantly faster than other R functions. Though not demonstrated here, this is generally true regardless of the sample size and covariance structure used. `mmrm` is faster than `PROC GLIMMIX`.\n\n### BCVA Data\n\nThe MMRM implementations are now applied to the BCVA dataset 10 times. The convergence times are presented below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n\nTable: Comparison of convergence times: seconds\n\n|Implementation | Median| First Quartile| Third Quartile|\n|:--------------|------:|--------------:|--------------:|\n|mmrm | 3.36| 3.32| 3.46|\n|glmmTMB | 18.65| 18.14| 18.87|\n|PROC GLIMMIX | 36.25| 36.17| 36.29|\n|gls | 164.36| 158.61| 165.93|\n|lmer | 165.26| 157.46| 166.42|\n\n\n:::\n:::\n\n\nWe again find that `mmrm` produces the fastest convergence times on average.\n\n## Marginal Treatment Effect Estimates Comparison\n\nWe next estimate the marginal mean treatment effects for each visit in the FEV and BCVA datasets using the MMRM fitting procedures. All R implementations' estimates are reported relative to `PROC GLIMMIX`'s estimates. Convergence status is also reported.\n\n### FEV Data\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](r-sas_mmrm_files/figure-html/review-treatment-fev-1.png){width=672}\n:::\n:::\n\n\nThe R procedures' estimates are very similar to those output by `PROC GLIMMIX`, though `mmrm` and `gls` generate the estimates that are closest to those produced when using SAS. All methods converge using their default optimization arguments.\n\n### BCVA Data\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](r-sas_mmrm_files/figure-html/review-treatment-bcva-1.png){width=672}\n:::\n\n::: {.cell-output-display}\n![](r-sas_mmrm_files/figure-html/review-treatment-bcva-2.png){width=672}\n:::\n:::\n\n\n`mmrm`, `gls` and `lmer` produce estimates that are virtually identical to `PROC GLIMMIX`'s, while `glmmTMB` does not. This is likely explained by `glmmTMB`'s failure to converge. Note too that `lmer` fails to converge.\n\n## Impact of Missing Data on Convergence Rates\n\nThe results of the previous benchmark suggest that the amount of patients missing from later time points affect certain implementations' capacity to converge. We investigate this further by simulating data using a data-generating process similar to that of the BCVA datasets, though with various rates of patient dropout.\n\nTen datasets of 200 patients are generated each of the following levels of missingness: none, mild, moderate, and high. In all scenarios, observations are missing at random. The number patients observed at each visit is obtained for one replicated dataset at each level of missingness is presented in the table below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n\nTable: Number of patients per visit\n\n| | none| mild| moderate| high|\n|:-----|----:|-----:|--------:|-----:|\n|VIS01 | 200| 196.7| 197.6| 188.1|\n|VIS02 | 200| 195.4| 194.4| 182.4|\n|VIS03 | 200| 195.1| 190.7| 175.2|\n|VIS04 | 200| 194.1| 188.4| 162.8|\n|VIS05 | 200| 191.6| 182.5| 142.7|\n|VIS06 | 200| 188.2| 177.3| 125.4|\n|VIS07 | 200| 184.6| 168.0| 105.9|\n|VIS08 | 200| 178.5| 155.4| 82.6|\n|VIS09 | 200| 175.3| 139.9| 58.1|\n|VIS10 | 200| 164.1| 124.0| 39.5|\n\n\n:::\n:::\n\n\nThe convergence rates of all implementations for stratified by missingness level is presented in the plot below.\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](r-sas_mmrm_files/figure-html/review-convergence-rate-missingness-1.png){width=672}\n:::\n:::\n\n\n`mmrm`, `gls`, and `PROC GLIMMIX` are resilient to missingness, only exhibiting some convergence problems in the scenarios with the most missingness. These implementations converged in all the other scenarios' replicates. `glmmTMB`, on the other hand, has convergence issues in the no-, mild-, and high-missingness datasets, with the worst convergence rate occurring in the datasets with the most dropout. Finally, `lmer` is unreliable in all scenarios, suggesting that it's convergence issues stem from something other than the missing observations.\n\nNote that the default optimization schemes are used for each method; these schemes can be modified to potentially improve convergence rates.\n\nA more comprehensive simulation study using data-generating processes similar to the one used here is outlined in the [`simulations/missing-data-benchmarks`](https://github.com/openpharma/mmrm/tree/main/simulations/missing-data-benchmarks) subdirectory. In addition to assessing the effect of missing data on software convergence rates, we also evaluate these methods' fit times and empirical bias, variance, 95% coverage rates, type I error rates and type II error rates. `mmrm` is found to be the most most robust software for fitting MMRMs in scenarios where a large proportion of patients are missing from the last time points. Additionally, `mmrm` has the fastest average fit times regardless of the amount of missingness. All implementations considered produce similar empirical biases, variances, 95% coverage rates, type I error rates and type II error rates.\n", + "supporting": [ + "r-sas_mmrm_files" + ], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/Comp/r-sas_mmrm/figure-html/review-convergence-rate-missingness-1.png b/_freeze/Comp/r-sas_mmrm/figure-html/review-convergence-rate-missingness-1.png new file mode 100644 index 000000000..bab25b7b5 Binary files /dev/null and b/_freeze/Comp/r-sas_mmrm/figure-html/review-convergence-rate-missingness-1.png differ diff --git a/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-bcva-1.png b/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-bcva-1.png new file mode 100644 index 000000000..929d56052 Binary files /dev/null and b/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-bcva-1.png differ diff --git a/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-bcva-2.png b/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-bcva-2.png new file mode 100644 index 000000000..9df76fd2d Binary files /dev/null and b/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-bcva-2.png differ diff --git a/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-fev-1.png b/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-fev-1.png new file mode 100644 index 000000000..88276acc8 Binary files /dev/null and b/_freeze/Comp/r-sas_mmrm/figure-html/review-treatment-fev-1.png differ diff --git a/_freeze/Comp/r-sas_negbin/execute-results/html.json b/_freeze/Comp/r-sas_negbin/execute-results/html.json index 9136915a5..1c0b31128 100644 --- a/_freeze/Comp/r-sas_negbin/execute-results/html.json +++ b/_freeze/Comp/r-sas_negbin/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "6b6a5d9808ca509f4c26d2a9a36cd608", + "hash": "cefce732017e3a03a15c5fbaca0ac957", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS: Negative Binomial Regression\"\nformat: html\ntoc: true\necho: true\neval: false\nkeep-hidden: true\n---\n\n# Summary\n\n## Goal\n\nComparison of implementations and results between SAS vs R for negative binomial regression for count data.\n\n## Scope\n\n::::::: columns\n:::: {.column width=\"45%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Methodologies\n\n- Negative binomial regression\\\n:::\n::::\n\n:::: {.column width=\"55%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Technical implementations\n\n- SAS: `PROC GENMOD` with option: `DIST = NB` or `DIST = NEGBIN`\n- R: `MASS::glm.nb`\\\n:::\n::::\n:::::::\n\n## Findings\n\nBelow are summary of findings from a numerical comparison using dummy data, where possible we specify the same algorithm in R and SAS (see section [Numerical Comparisons](#sec-num-comp) for details).\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Negative binomial regression\n\nExact match (at 0.001 level) can be obtained using `glm.nb` in R vs `PROC GENMOD` procedure in SAS, for parameter and lsmeans estimates, confidence intervals and p-values after manually adjusting the estimated variance-covariance matrix in R. For the dispersion parameter the MLEs also match, however discrepancies in the confidence intervals are observed.\n:::\n\nIn the following sections the implementations will be compared in tabular fashion followed by a numerical comparison using dummy data.\n\n# Comparison of key features of SAS and R implementations\n\nThe following set of tables compare the two implementations and which options need to be adjusted in R to obtain matching results if necessary. The following aspects are compared:\n\n1. Parameterization of the negative binomial distribution\n2. Likelihood optimization algorithm\n3. Estimation of the variance-covariance matrix\n4. Convergence criteria\n5. Confidence interval (CI) estimation method\n6. Hypothesis tests for regression coefficients\n7. Estimation of marginal means\n\n## How to read the tables:\n\nLet's walk through the conclusions for [Table 1](#tbl-1):\n\n- SAS and R use different parameterizations of the negative binomial\n- SAS and R use different likelihood optimization algorithms\n- There are differences in the estimation of the variance-covariance matrix, in particular the covariance between dispersion/scale parameter and model coefficients. It is however possible to obtain the SAS variance-covariance matrix in R.\n- Convergence criteria are not generally identical in SAS and R.\n- CI estimation methods are by default not identical but by using alternative confint function in R SAS method can be reproduced\\\n- Methods for hypothesis testing for model coefficients are equivalent in SAS and R.\n- Least-square or marginal means are not directly available in R but equivalent estimation as in SAS is possible with additional packages\n\n| Attribute | SAS
`PROC GENMOD` | R
`MASS::glm.nb` | Note |\n|:----------------:|:----------------:|:----------------:|:-----------------|\n| Negative binomial parameterization | Variance of the negative binomial is given by $\\mu + k\\mu^2$ and the dispersion parameter `k` is estimated. Overdispersion increases as `k` goes to infinity. | Variance of the negative binomial is given by $\\mu + \\frac{\\mu^2}{\\theta}$ and the scale parameter `theta` is estimated. Overdispersion increases as `theta` goes to zero. | $k=\\frac{1}{\\theta}$ |\n| Likelihood optimization algorithm | Ridge-stabilized Newton-Raphson algorithm | Iteratively reweighted least squares (IWLS) | It seems SAS performs simultaneous optimization on all parameters (i.e. including dispersion). R uses an alternating process, where glm coefficients are fitted given a fixed theta and then theta is estimated given fixed coefficients until convergence. |\n| Estimation of variance-covariance matrix | Observed (rather than expected) fisher information is used for calculation of standard errors of coefficients, which allows for non-zero covariance between coefficients and dispersion parameter. | Expected fisher information is used for calculation of standard errors of coefficients, so covariance between coefficients and dispersion parameter is zero (which is asymptotically correct). However identical vcov matrix as in SAS can be obtained \"post-hoc\". | As shown in the numerical example below in R the variance-covariance matrix corresponding to the `PROC GENMOD` estimation can be obtained based on the outputs from `MASS::glm.nb` with the `glm.nb.cov` function. The \"correct\" standard errors, confidence intervals and p-values can then be manually calculated based on the new covariance matrix. |\n| Convergence criteria | The iterations are considered to have converged when the maximum change in the parameter estimates between iteration steps is less than the value specified in `CONVERGE` option (default: `CONVERGENCE = 1E-4`) | Based on relative difference between deviance, specified through `epsilon` argument in `glm.control` (default: `epsilon = 1e-8`). | `PROC GENMOD` also checks for relative Hessian convergence and throws a warning if this is larger than the value provided in the `CONVH` option (default: `CONVH = 1E-4` ). |\n| Confidence interval (CI) estimation method | By default asymptotic Wald CIs are estimated. Profile likelihood CI is estimated if option `LRCI` is provided. | `confint` function will estimate profile likelihood CIs, Wald CIs can be obtained by using `confint.default` | Note that by default confidence intervals can differ even if same method is used if vcov matrix in R is not adjusted as explained above. |\n| Hypothesis tests for regression coefficients | Asymptotic Wald test | Asymptotic Wald test | `PROC GENMOD` reports Wald Chi-square statistic, while `MASS::glm.nb` reports the Z statistic, however the p-values are equivalent. Note that by default test results will differ if vcov matrix in R is not adjusted as explained above. |\n| Estimation of least-square/marginal means | Calculation through lsmeans statement assumes that for classification effects the groups are balanced. `OM` option can be provided to obtain lsmeans that are using the observed proportions in the data | Not implemented as part of `MASS::glm.nb` but can be obtained using `emmeans` package. | In R marginal means can be obtained using the `emmeans::emmeans` function, setting argument `weights = \"equal\"` corresponds to the default option in SAS, while `weights = \"proportional\"` gives the means proportional to observed data |\n\n: Negative binomial regression in SAS vs R {#tbl-1}\n\n# Numerical Comparison {#sec-num-comp}\n\n- SAS `PROC GENMOD` procedure\n\n- R `MASS::glm.nb`\n\nA dummy dataset is first generated, and negative binomial regression is applied to the dummy dataset for demonstration purposes. Every effort is made to ensure that the R code employs estimation methods/ optimization algorithms/ other components that closely match (as much as possible) those used in the SAS code. This is done to facilitate a comparison of estimates, 95% confidence intervals (CIs), and p-values between the two implementations.\n\n## Prerequisites: R packages\n\nIn order to run these analyses we need to load a few packages.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(MASS)\nlibrary(dplyr)\n```\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\n\nAttaching package: 'dplyr'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nThe following object is masked from 'package:MASS':\n\n select\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nThe following objects are masked from 'package:stats':\n\n filter, lag\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nThe following objects are masked from 'package:base':\n\n intersect, setdiff, setequal, union\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(emmeans)\n```\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nWelcome to emmeans.\nCaution: You lose important information if you filter this package's results.\nSee '? untidy'\n```\n\n\n:::\n:::\n\n\nWe also define the `glm_nb_cov` function to obtain the SAS variance-covariance matrix in R from [here](https://stats.stackexchange.com/questions/221648/negative-binomial-regression-in-r-allowing-for-correlation-between-dispersion).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Helper function to compute the variance from negative binomial regression\n## This matches with variance estimated from SAS\nglm_nb_cov <- function(mod) {\n # given a model fitted by glm.nb in MASS, this function returns a variance covariance matrix for the\n # regression coefficients and dispersion parameter, without assuming independence between these\n # note that the model must have been fitted with x=TRUE argument so that design matrix is available\n\n # formulae based on p23-p24 of\n # http://pointer.esalq.usp.br/departamentos/lce/arquivos/aulas/2011/LCE5868/OverdispersionBook.pdf\n # and http://www.math.mcgill.ca/~dstephens/523/Papers/Lawless-1987-CJS.pdf\n\n # lintr: off\n # please rm -- variable not used!\n # k <- mod$theta\n # lintr: on\n # p is number of regression coefficients\n p <- dim(vcov(mod))[1]\n\n # construct observed information matrix\n obsInfo <- array(0, dim = c(p + 1, p + 1))\n\n # first calculate top left part for regression coefficients\n for (i in 1:p) {\n for (j in 1:p) {\n obsInfo[i, j] <- sum(\n (1 + mod$y / mod$theta) *\n mod$fitted.values *\n mod$x[, i] *\n mod$x[, j] /\n (1 + mod$fitted.values / mod$theta)^2\n )\n }\n }\n\n # information for dispersion parameter\n obsInfo[(p + 1), (p + 1)] <- -sum(\n trigamma(mod$theta + mod$y) -\n trigamma(mod$theta) -\n 1 / (mod$fitted.values + mod$theta) +\n (mod$theta + mod$y) / (mod$theta + mod$fitted.values)^2 -\n 1 / (mod$fitted.values + mod$theta) +\n 1 / mod$theta\n )\n\n # covariance between regression coefficients and dispersion\n for (i in 1:p) {\n obsInfo[(p + 1), i] <- -sum(\n ((mod$y - mod$fitted.values) *\n mod$fitted.values /\n ((mod$theta + mod$fitted.values)^2)) *\n mod$x[, i]\n )\n obsInfo[i, (p + 1)] <- obsInfo[(p + 1), i]\n }\n\n # return variance covariance matrix\n solve(obsInfo, tol = 1e-20)\n}\n```\n:::\n\n\n## Dummy data\n\nA dummy dataset is simulated, including\n\n- 100 subjects;\n- $grp$: a dummy variable with 1:1 subject assignment to treatment ($grp = 1$) vs placebo ($grp = 0$); note, variable $grpc$ is a character version of $grp$, which takes the value of \"Trt\" or \"Plb\".\n- $x1$: a continuous variable which follows a normal distribution of mean of 0 and sd of 1;\n- $x2$: a categorical variable which take the value of \"A\" or \"B\" or \"C\" with a probability of 0.3, 0.2, 0.5, respectively.\n- $logtime$: An offset for the calculation of rates (e.g time in years) on the log-scale\n- $y$: a negative binomial outcome giving the event counts;\n\nThe dummy dataset is saved as a csv file, and then the csv file is read into SAS.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nN = 100\n\n# set seed for replication\nset.seed(123)\n\n# Treatment Group; 1:1 ratio\ngrp <- rep(c(0, 1), each = N / 2)\n\n# Covariates (one continuous; one categorical)\nx1 <- rnorm(N)\nx2 <- factor(sample(LETTERS[1:3], N, replace = TRUE, prob = c(0.3, 0.2, 0.5)))\n\n# Offset\nlogtime <- log(runif(N, 1, 2))\n\n# Model parameter assumption\nbeta0 = 0.6\nbetaTrt = -0.5\nbeta1 = 0.25\nbeta2 = c(-0.1, 0.2)\ntheta = 1 / 2\n\n\n# Dummy dataset\ndf <- data.frame(grp, x1, x2, logtime) %>%\n mutate(\n log_rate = case_when(\n x2 == \"A\" ~ beta0 + betaTrt * grp + beta1 * x1 + logtime,\n x2 == \"B\" ~ beta0 + betaTrt * grp + beta1 * x1 + beta2[1] + logtime,\n x2 == \"C\" ~ beta0 + betaTrt * grp + beta1 * x1 + beta2[2] + logtime\n ),\n y = rnegbin(N, mu = exp(log_rate), theta = theta),\n grpc = factor(case_when(grp == 0 ~ \"Plb\", grp == 1 ~ \"Trt\"))\n )\n\n# save the dummy dataset to be imported in SAS\n# write.csv(df, file = \"df_dummy_negbin.csv\")\n```\n:::\n\n\n## Negative binomial regression\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for negative binomial regression\n\nExact match (at 0.001 level) can be obtained using `glm.nb` in R vs `PROC GENMOD` procedure in SAS, for parameters and lsmeans estimates, confidence intervals and p-values after manually adjusting the estimated variance-covariance matrix in R. For the dispersion parameter the MLEs also match, however discrepancies in the confidence intervals are observed.\n:::\n\n### Negative binomial regression in SAS\n\nAfter importing the dummy dataset we can run the negative binomial regression in SAS using \\`PROC GENMOD. We estimate the model parameters and lsmeans for the treatment arms using both the default and OM weights.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc genmod data=df;\n\tclass GRPC (ref='Plb') X2 (ref='A');\n\tmodel y = GRPC x1 x2 / dist=negbin link=log offset=logtime;\n\tlsmeans GRPC /cl;\n\tlsmeans GRPC /cl OM;\nrun;\n```\n:::\n\n\nBelow is a screenshot of output tables summarizing coefficient estimates and lsmeans.\n\n![](../images/negbin/sas_negbin_estimates.jpg){fig-align=\"left\"}\n\n## Negative binomial regression in R\n\nLets now try to reproduce the results in R using `MASS::glm.nb`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit <- glm.nb(y ~ grpc + x1 + x2 + offset(logtime), data = df, x = TRUE)\n\n# model coefficients summary\nsummary(fit)$coefficients\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error z value Pr(>|z|)\n(Intercept) 0.72652157 0.3507054 2.0716007 0.03830269\ngrpcTrt -0.61401736 0.3414815 -1.7980982 0.07216145\nx1 0.25663164 0.1890455 1.3575129 0.17461831\nx2B -0.37406342 0.5069487 -0.7378723 0.46059203\nx2C -0.04999267 0.3916689 -0.1276401 0.89843376\n```\n\n\n:::\n:::\n\n\nWe can see that while the estimates are exactly matching those in SAS, the standard errors are slightly smaller. This is a result of the difference in covariance estimation mentioned above. To obtain exactly the same results as in SAS we need to re-estimate the covariance matrix using the `glm_nb_cov` function we defined earlier. Note that to use this function with the fitted results we needed to specify `x = TRUE` in the `glm.nb` function so that the design matrix is available.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsigma_hat <- glm_nb_cov(fit)\n\n## recalculate confidence intervals, and p-values\ncoef_est <- coef(fit)\ncoef_se <- sqrt(diag(sigma_hat)[1:5])\n\ncoef_lower <- coef_est - qnorm(0.975) * coef_se\ncoef_upper <- coef_est + qnorm(0.975) * coef_se\n\nzstat <- coef_est / coef_se\npval <- 2 * (1 - pnorm(abs(zstat)))\n\nnew_summary <- cbind(coef_est, coef_se, coef_lower, coef_upper, zstat, pval)\n\ncolnames(new_summary) <- c(\n \"Estimate\",\n \"Std. Error\",\n \"CI_lower\",\n \"CI_upper\",\n \"z value\",\n \"Pr(>|z|)\"\n)\nrownames(new_summary) <- rownames(summary(fit)$coefficients)\nnew_summary\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error CI_lower CI_upper z value Pr(>|z|)\n(Intercept) 0.72652157 0.3517882 0.03702942 1.41601371 2.0652246 0.03890176\ngrpcTrt -0.61401736 0.3479606 -1.29600763 0.06797291 -1.7646174 0.07762809\nx1 0.25663164 0.2066499 -0.14839474 0.66165803 1.2418667 0.21428575\nx2B -0.37406342 0.5073695 -1.36848936 0.62036253 -0.7372604 0.46096404\nx2C -0.04999267 0.4013463 -0.83661692 0.73663158 -0.1245624 0.90086997\n```\n\n\n:::\n:::\n\n\nNow the estimates, standard errors, 95% confidence interval limits and p-values are exactly matching those in SAS up to the 4th digit. We can also provide an estimate and CI for the dispersion parameter:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# estimate and 95%-CI for k = 1/theta\ntheta_est <- fit$theta\ntheta_se <- sqrt(sigma_hat[6, 6])\n\ntheta_est_ci <- c(\n theta_est,\n theta_est - qnorm(0.975) * theta_se,\n theta_est + qnorm(0.975) * theta_se\n)\n1 / theta_est_ci[c(1, 3, 2)]\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.370525 1.672211 4.070264\n```\n\n\n:::\n:::\n\n\nWe see that while the point estimate is the same as in SAS, the CI for the dispersion does not match, most likely due to the different parameterizations used by SAS and R.\n\nFinally we can replicate the estimation of lsmeans in SAS via the emmeans package. Note that we need to supply the re-estimated covariance matrix, but only provide the rows and columns for the model coefficients without the dispersion parameter as emmeans does not need the latter.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# lsmeans with weights = equal, equivalent to SAS default\nlsmean1 <- emmeans(\n fit,\n ~grpc,\n data = df,\n vcov. = sigma_hat[1:5, 1:5],\n weights = \"equal\",\n offset = 0\n)\nlsmean1\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df asymp.LCL asymp.UCL\n Plb 0.60837 0.245 Inf 0.128 1.088\n Trt -0.00565 0.268 Inf -0.531 0.519\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\n# lsmeans with weights = proportional, equivalent to SAS OM option\nlsmean2 <- emmeans(\n fit,\n ~grpc,\n data = df,\n vcov. = sigma_hat[1:5, 1:5],\n weights = \"proportional\",\n offset = 0\n)\nlsmean2\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df asymp.LCL asymp.UCL\n Plb 0.6527 0.237 Inf 0.188 1.117\n Trt 0.0386 0.250 Inf -0.451 0.528\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n:::\n\n\nEstimates and CIs are exactly matching those in SAS for both of the options. Finally we can also obtain the z statistic and corresponding p-values:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntest(lsmean1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df z.ratio p.value\n Plb 0.60837 0.245 Inf 2.484 0.0130\n Trt -0.00565 0.268 Inf -0.021 0.9832\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\ntest(lsmean2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df z.ratio p.value\n Plb 0.6527 0.237 Inf 2.753 0.0059\n Trt 0.0386 0.250 Inf 0.155 0.8770\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \n```\n\n\n:::\n:::\n\n\nAnd we see that these are also identical to the SAS results.\n\n## Discussion\n\nAs shown above it is generally possible to obtain exactly matching results in SAS and R for negative binomial regression. Most important to ensure matching is the manual estimation of the covariance matrix in R, as otherwise standard errors will only asymptotically match those in SAS.\n\nAs shown above lsmeans-type estimates can also be exactly reproduced using the emmeans package in R if options are correctly specified.\n\nFor the dispersion parameter an exact match in the MLE is possible, however CIs were not matching in our example. Most likely this is due to the different parameterizations used in SAS and R, since the variance for the dispersion parameters can not be transformed exactly between the two parameterizations. As generally the dispersion parameter should be of lesser interest and the other parameter estimates are not affected by this, this may however not be an issue in most applications.\n\nEven though results matched in the numerical example we have also highlighted that there are differences in the implementations, in particular when it comes to maximum likelihood optimization methods and convergence criteria. It is possible that this may lead to different estimates for data where the MLE is not easy to find and the methods may disagree on convergence or the optima of the likelihood. In addition, the different parameterizations may lead to different results in scenarios, where there is only very little overdispersion, since in those cases the dispersion parameter will go towards zero in SAS and towards infinity in R.\n\nAs a final point it should be kept in mind when comparing SAS and R results, that the two apply different rules for rounding. R rounds to the even digit (i.e. both 1.5 and 2.5 round to 2), while SAS uses \"conventional\" rounding rules (i.e 1.5 is rounded to 2 and 2.5 to 3). This can also occasionally lead to differences in results and may need to be addressed by using a custom rounding function in R, that uses SAS rounding rules. An example of such a function is provided in one of the references given below.\n\n## References\n\n- [SAS PROC GENMOD documentation](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.3/statug/statug_genmod_toc.htm).\n- [R glm.nb documentation](https://www.rdocumentation.org/packages/MASS/versions/7.3-60.0.1/topics/glm.nb).\n- [CrossValidated discussion on covariance estimation](https://stats.stackexchange.com/questions/221648/negative-binomial-regression-in-r-allowing-for-correlation-between-dispersion) (`glm.nb.cov` function is provided in the answer by Jonathan Bartlett).\n- [Discussion of general differences in SAS and R including rounding](https://www.lexjansen.com/phuse-us/2020/ct/CT05.pdf)", + "markdown": "---\ntitle: \"R vs SAS: Negative Binomial Regression\"\nformat: html\ntoc: true\nexecute:\n echo: true\nkeep-hidden: true\n---\n\n# Summary\n\n## Goal\n\nComparison of implementations and results between SAS vs R for negative binomial regression for count data.\n\n## Scope\n\n::::::: columns\n:::: {.column width=\"45%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Methodologies\n\n- Negative binomial regression\\\n:::\n::::\n\n:::: {.column width=\"55%\"}\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n## Technical implementations\n\n- SAS: `PROC GENMOD` with option: `DIST = NB` or `DIST = NEGBIN`\n- R: `MASS::glm.nb`\\\n:::\n::::\n:::::::\n\n## Findings\n\nBelow are summary of findings from a numerical comparison using dummy data, where possible we specify the same algorithm in R and SAS (see section [Numerical Comparisons](#sec-num-comp) for details).\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Negative binomial regression\n\nExact match (at 0.001 level) can be obtained using `glm.nb` in R vs `PROC GENMOD` procedure in SAS, for parameter and lsmeans estimates, confidence intervals and p-values after manually adjusting the estimated variance-covariance matrix in R. For the dispersion parameter the MLEs also match, however discrepancies in the confidence intervals are observed.\n:::\n\nIn the following sections the implementations will be compared in tabular fashion followed by a numerical comparison using dummy data.\n\n# Comparison of key features of SAS and R implementations\n\nThe following set of tables compare the two implementations and which options need to be adjusted in R to obtain matching results if necessary. The following aspects are compared:\n\n1. Parameterization of the negative binomial distribution\n2. Likelihood optimization algorithm\n3. Estimation of the variance-covariance matrix\n4. Convergence criteria\n5. Confidence interval (CI) estimation method\n6. Hypothesis tests for regression coefficients\n7. Estimation of marginal means\n\n## How to read the tables:\n\nLet's walk through the conclusions for [Table 1](#tbl-1):\n\n- SAS and R use different parameterizations of the negative binomial\n- SAS and R use different likelihood optimization algorithms\n- There are differences in the estimation of the variance-covariance matrix, in particular the covariance between dispersion/scale parameter and model coefficients. It is however possible to obtain the SAS variance-covariance matrix in R.\n- Convergence criteria are not generally identical in SAS and R.\n- CI estimation methods are by default not identical but by using alternative confint function in R SAS method can be reproduced\\\n- Methods for hypothesis testing for model coefficients are equivalent in SAS and R.\n- Least-square or marginal means are not directly available in R but equivalent estimation as in SAS is possible with additional packages\n\n| Attribute | SAS
`PROC GENMOD` | R
`MASS::glm.nb` | Note |\n|:----------------:|:----------------:|:----------------:|:-----------------|\n| Negative binomial parameterization | Variance of the negative binomial is given by $\\mu + k\\mu^2$ and the dispersion parameter `k` is estimated. Overdispersion increases as `k` goes to infinity. | Variance of the negative binomial is given by $\\mu + \\frac{\\mu^2}{\\theta}$ and the scale parameter `theta` is estimated. Overdispersion increases as `theta` goes to zero. | $k=\\frac{1}{\\theta}$ |\n| Likelihood optimization algorithm | Ridge-stabilized Newton-Raphson algorithm | Iteratively reweighted least squares (IWLS) | It seems SAS performs simultaneous optimization on all parameters (i.e. including dispersion). R uses an alternating process, where glm coefficients are fitted given a fixed theta and then theta is estimated given fixed coefficients until convergence. |\n| Estimation of variance-covariance matrix | Observed (rather than expected) fisher information is used for calculation of standard errors of coefficients, which allows for non-zero covariance between coefficients and dispersion parameter. | Expected fisher information is used for calculation of standard errors of coefficients, so covariance between coefficients and dispersion parameter is zero (which is asymptotically correct). However identical vcov matrix as in SAS can be obtained \"post-hoc\". | As shown in the numerical example below in R the variance-covariance matrix corresponding to the `PROC GENMOD` estimation can be obtained based on the outputs from `MASS::glm.nb` with the `glm.nb.cov` function. The \"correct\" standard errors, confidence intervals and p-values can then be manually calculated based on the new covariance matrix. |\n| Convergence criteria | The iterations are considered to have converged when the maximum change in the parameter estimates between iteration steps is less than the value specified in `CONVERGE` option (default: `CONVERGENCE = 1E-4`) | Based on relative difference between deviance, specified through `epsilon` argument in `glm.control` (default: `epsilon = 1e-8`). | `PROC GENMOD` also checks for relative Hessian convergence and throws a warning if this is larger than the value provided in the `CONVH` option (default: `CONVH = 1E-4` ). |\n| Confidence interval (CI) estimation method | By default asymptotic Wald CIs are estimated. Profile likelihood CI is estimated if option `LRCI` is provided. | `confint` function will estimate profile likelihood CIs, Wald CIs can be obtained by using `confint.default` | Note that by default confidence intervals can differ even if same method is used if vcov matrix in R is not adjusted as explained above. |\n| Hypothesis tests for regression coefficients | Asymptotic Wald test | Asymptotic Wald test | `PROC GENMOD` reports Wald Chi-square statistic, while `MASS::glm.nb` reports the Z statistic, however the p-values are equivalent. Note that by default test results will differ if vcov matrix in R is not adjusted as explained above. |\n| Estimation of least-square/marginal means | Calculation through lsmeans statement assumes that for classification effects the groups are balanced. `OM` option can be provided to obtain lsmeans that are using the observed proportions in the data | Not implemented as part of `MASS::glm.nb` but can be obtained using `emmeans` package. | In R marginal means can be obtained using the `emmeans::emmeans` function, setting argument `weights = \"equal\"` corresponds to the default option in SAS, while `weights = \"proportional\"` gives the means proportional to observed data |\n\n: Negative binomial regression in SAS vs R {#tbl-1}\n\n# Numerical Comparison {#sec-num-comp}\n\n- SAS `PROC GENMOD` procedure\n\n- R `MASS::glm.nb`\n\nA dummy dataset is first generated, and negative binomial regression is applied to the dummy dataset for demonstration purposes. Every effort is made to ensure that the R code employs estimation methods/ optimization algorithms/ other components that closely match (as much as possible) those used in the SAS code. This is done to facilitate a comparison of estimates, 95% confidence intervals (CIs), and p-values between the two implementations.\n\n## Prerequisites: R packages\n\nIn order to run these analyses we need to load a few packages.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(MASS)\nlibrary(dplyr)\n```\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\n\nAttaching package: 'dplyr'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nThe following object is masked from 'package:MASS':\n\n select\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nThe following objects are masked from 'package:stats':\n\n filter, lag\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nThe following objects are masked from 'package:base':\n\n intersect, setdiff, setequal, union\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(emmeans)\n```\n\n::: {.cell-output .cell-output-stderr .hidden}\n\n```\nWelcome to emmeans.\nCaution: You lose important information if you filter this package's results.\nSee '? untidy'\n```\n\n\n:::\n:::\n\n\nWe also define the `glm_nb_cov` function to obtain the SAS variance-covariance matrix in R from [here](https://stats.stackexchange.com/questions/221648/negative-binomial-regression-in-r-allowing-for-correlation-between-dispersion).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Helper function to compute the variance from negative binomial regression\n## This matches with variance estimated from SAS\nglm_nb_cov <- function(mod) {\n # given a model fitted by glm.nb in MASS, this function returns a variance covariance matrix for the\n # regression coefficients and dispersion parameter, without assuming independence between these\n # note that the model must have been fitted with x=TRUE argument so that design matrix is available\n\n # formulae based on p23-p24 of\n # http://pointer.esalq.usp.br/departamentos/lce/arquivos/aulas/2011/LCE5868/OverdispersionBook.pdf\n # and http://www.math.mcgill.ca/~dstephens/523/Papers/Lawless-1987-CJS.pdf\n\n # lintr: off\n # please rm -- variable not used!\n # k <- mod$theta\n # lintr: on\n # p is number of regression coefficients\n p <- dim(vcov(mod))[1]\n\n # construct observed information matrix\n obsInfo <- array(0, dim = c(p + 1, p + 1))\n\n # first calculate top left part for regression coefficients\n for (i in 1:p) {\n for (j in 1:p) {\n obsInfo[i, j] <- sum(\n (1 + mod$y / mod$theta) *\n mod$fitted.values *\n mod$x[, i] *\n mod$x[, j] /\n (1 + mod$fitted.values / mod$theta)^2\n )\n }\n }\n\n # information for dispersion parameter\n obsInfo[(p + 1), (p + 1)] <- -sum(\n trigamma(mod$theta + mod$y) -\n trigamma(mod$theta) -\n 1 / (mod$fitted.values + mod$theta) +\n (mod$theta + mod$y) / (mod$theta + mod$fitted.values)^2 -\n 1 / (mod$fitted.values + mod$theta) +\n 1 / mod$theta\n )\n\n # covariance between regression coefficients and dispersion\n for (i in 1:p) {\n obsInfo[(p + 1), i] <- -sum(\n ((mod$y - mod$fitted.values) *\n mod$fitted.values /\n ((mod$theta + mod$fitted.values)^2)) *\n mod$x[, i]\n )\n obsInfo[i, (p + 1)] <- obsInfo[(p + 1), i]\n }\n\n # return variance covariance matrix\n solve(obsInfo, tol = 1e-20)\n}\n```\n:::\n\n\n## Dummy data\n\nA dummy dataset is simulated, including\n\n- 100 subjects;\n- $grp$: a dummy variable with 1:1 subject assignment to treatment ($grp = 1$) vs placebo ($grp = 0$); note, variable $grpc$ is a character version of $grp$, which takes the value of \"Trt\" or \"Plb\".\n- $x1$: a continuous variable which follows a normal distribution of mean of 0 and sd of 1;\n- $x2$: a categorical variable which take the value of \"A\" or \"B\" or \"C\" with a probability of 0.3, 0.2, 0.5, respectively.\n- $logtime$: An offset for the calculation of rates (e.g time in years) on the log-scale\n- $y$: a negative binomial outcome giving the event counts;\n\nThe dummy dataset is saved as a csv file, and then the csv file is read into SAS.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nN = 100\n\n# set seed for replication\nset.seed(123)\n\n# Treatment Group; 1:1 ratio\ngrp <- rep(c(0, 1), each = N / 2)\n\n# Covariates (one continuous; one categorical)\nx1 <- rnorm(N)\nx2 <- factor(sample(LETTERS[1:3], N, replace = TRUE, prob = c(0.3, 0.2, 0.5)))\n\n# Offset\nlogtime <- log(runif(N, 1, 2))\n\n# Model parameter assumption\nbeta0 = 0.6\nbetaTrt = -0.5\nbeta1 = 0.25\nbeta2 = c(-0.1, 0.2)\ntheta = 1 / 2\n\n\n# Dummy dataset\ndf <- data.frame(grp, x1, x2, logtime) %>%\n mutate(\n log_rate = case_when(\n x2 == \"A\" ~ beta0 + betaTrt * grp + beta1 * x1 + logtime,\n x2 == \"B\" ~ beta0 + betaTrt * grp + beta1 * x1 + beta2[1] + logtime,\n x2 == \"C\" ~ beta0 + betaTrt * grp + beta1 * x1 + beta2[2] + logtime\n ),\n y = rnegbin(N, mu = exp(log_rate), theta = theta),\n grpc = factor(case_when(grp == 0 ~ \"Plb\", grp == 1 ~ \"Trt\"))\n )\n\n# save the dummy dataset to be imported in SAS\n# write.csv(df, file = \"df_dummy_negbin.csv\")\n```\n:::\n\n\n## Negative binomial regression\n\n::: {.callout-note appearance=\"minimal\" collapse=\"false\"}\n### Conclusion for negative binomial regression\n\nExact match (at 0.001 level) can be obtained using `glm.nb` in R vs `PROC GENMOD` procedure in SAS, for parameters and lsmeans estimates, confidence intervals and p-values after manually adjusting the estimated variance-covariance matrix in R. For the dispersion parameter the MLEs also match, however discrepancies in the confidence intervals are observed.\n:::\n\n### Negative binomial regression in SAS\n\nAfter importing the dummy dataset we can run the negative binomial regression in SAS using \\`PROC GENMOD. We estimate the model parameters and lsmeans for the treatment arms using both the default and OM weights.\n\n```sas\nproc genmod data=df;\n\tclass GRPC (ref='Plb') X2 (ref='A');\n\tmodel y = GRPC x1 x2 / dist=negbin link=log offset=logtime;\n\tlsmeans GRPC /cl;\n\tlsmeans GRPC /cl OM;\nrun;\n```\n\nBelow is a screenshot of output tables summarizing coefficient estimates and lsmeans.\n\n![](../images/negbin/sas_negbin_estimates.jpg){fig-align=\"left\"}\n\n## Negative binomial regression in R\n\nLets now try to reproduce the results in R using `MASS::glm.nb`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit <- glm.nb(y ~ grpc + x1 + x2 + offset(logtime), data = df, x = TRUE)\n\n# model coefficients summary\nsummary(fit)$coefficients\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error z value Pr(>|z|)\n(Intercept) 0.72652157 0.3507054 2.0716007 0.03830269\ngrpcTrt -0.61401736 0.3414815 -1.7980982 0.07216145\nx1 0.25663164 0.1890455 1.3575129 0.17461831\nx2B -0.37406342 0.5069487 -0.7378723 0.46059203\nx2C -0.04999267 0.3916689 -0.1276401 0.89843376\n```\n\n\n:::\n:::\n\n\nWe can see that while the estimates are exactly matching those in SAS, the standard errors are slightly smaller. This is a result of the difference in covariance estimation mentioned above. To obtain exactly the same results as in SAS we need to re-estimate the covariance matrix using the `glm_nb_cov` function we defined earlier. Note that to use this function with the fitted results we needed to specify `x = TRUE` in the `glm.nb` function so that the design matrix is available.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsigma_hat <- glm_nb_cov(fit)\n\n## recalculate confidence intervals, and p-values\ncoef_est <- coef(fit)\ncoef_se <- sqrt(diag(sigma_hat)[1:5])\n\ncoef_lower <- coef_est - qnorm(0.975) * coef_se\ncoef_upper <- coef_est + qnorm(0.975) * coef_se\n\nzstat <- coef_est / coef_se\npval <- 2 * (1 - pnorm(abs(zstat)))\n\nnew_summary <- cbind(coef_est, coef_se, coef_lower, coef_upper, zstat, pval)\n\ncolnames(new_summary) <- c(\n \"Estimate\",\n \"Std. Error\",\n \"CI_lower\",\n \"CI_upper\",\n \"z value\",\n \"Pr(>|z|)\"\n)\nrownames(new_summary) <- rownames(summary(fit)$coefficients)\nnew_summary\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error CI_lower CI_upper z value Pr(>|z|)\n(Intercept) 0.72652157 0.3517882 0.03702942 1.41601371 2.0652246 0.03890176\ngrpcTrt -0.61401736 0.3479606 -1.29600763 0.06797291 -1.7646174 0.07762809\nx1 0.25663164 0.2066499 -0.14839474 0.66165803 1.2418667 0.21428575\nx2B -0.37406342 0.5073695 -1.36848936 0.62036253 -0.7372604 0.46096404\nx2C -0.04999267 0.4013463 -0.83661692 0.73663158 -0.1245624 0.90086997\n```\n\n\n:::\n:::\n\n\nNow the estimates, standard errors, 95% confidence interval limits and p-values are exactly matching those in SAS up to the 4th digit. We can also provide an estimate and CI for the dispersion parameter:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# estimate and 95%-CI for k = 1/theta\ntheta_est <- fit$theta\ntheta_se <- sqrt(sigma_hat[6, 6])\n\ntheta_est_ci <- c(\n theta_est,\n theta_est - qnorm(0.975) * theta_se,\n theta_est + qnorm(0.975) * theta_se\n)\n1 / theta_est_ci[c(1, 3, 2)]\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.370525 1.672211 4.070264\n```\n\n\n:::\n:::\n\n\nWe see that while the point estimate is the same as in SAS, the CI for the dispersion does not match, most likely due to the different parameterizations used by SAS and R.\n\nFinally we can replicate the estimation of lsmeans in SAS via the emmeans package. Note that we need to supply the re-estimated covariance matrix, but only provide the rows and columns for the model coefficients without the dispersion parameter as emmeans does not need the latter.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# lsmeans with weights = equal, equivalent to SAS default\nlsmean1 <- emmeans(\n fit,\n ~grpc,\n data = df,\n vcov. = sigma_hat[1:5, 1:5],\n weights = \"equal\",\n offset = 0\n)\nlsmean1\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df asymp.LCL asymp.UCL\n Plb 0.60837 0.245 Inf 0.128 1.088\n Trt -0.00565 0.268 Inf -0.531 0.519\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\n# lsmeans with weights = proportional, equivalent to SAS OM option\nlsmean2 <- emmeans(\n fit,\n ~grpc,\n data = df,\n vcov. = sigma_hat[1:5, 1:5],\n weights = \"proportional\",\n offset = 0\n)\nlsmean2\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df asymp.LCL asymp.UCL\n Plb 0.6527 0.237 Inf 0.188 1.117\n Trt 0.0386 0.250 Inf -0.451 0.528\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n:::\n\n\nEstimates and CIs are exactly matching those in SAS for both of the options. Finally we can also obtain the z statistic and corresponding p-values:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntest(lsmean1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df z.ratio p.value\n Plb 0.60837 0.245 Inf 2.484 0.0130\n Trt -0.00565 0.268 Inf -0.021 0.9832\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\ntest(lsmean2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n grpc emmean SE df z.ratio p.value\n Plb 0.6527 0.237 Inf 2.753 0.0059\n Trt 0.0386 0.250 Inf 0.155 0.8770\n\nResults are averaged over the levels of: x2 \nResults are given on the log (not the response) scale. \n```\n\n\n:::\n:::\n\n\nAnd we see that these are also identical to the SAS results.\n\n## Discussion\n\nAs shown above it is generally possible to obtain exactly matching results in SAS and R for negative binomial regression. Most important to ensure matching is the manual estimation of the covariance matrix in R, as otherwise standard errors will only asymptotically match those in SAS.\n\nAs shown above lsmeans-type estimates can also be exactly reproduced using the emmeans package in R if options are correctly specified.\n\nFor the dispersion parameter an exact match in the MLE is possible, however CIs were not matching in our example. Most likely this is due to the different parameterizations used in SAS and R, since the variance for the dispersion parameters can not be transformed exactly between the two parameterizations. As generally the dispersion parameter should be of lesser interest and the other parameter estimates are not affected by this, this may however not be an issue in most applications.\n\nEven though results matched in the numerical example we have also highlighted that there are differences in the implementations, in particular when it comes to maximum likelihood optimization methods and convergence criteria. It is possible that this may lead to different estimates for data where the MLE is not easy to find and the methods may disagree on convergence or the optima of the likelihood. In addition, the different parameterizations may lead to different results in scenarios, where there is only very little overdispersion, since in those cases the dispersion parameter will go towards zero in SAS and towards infinity in R.\n\nAs a final point it should be kept in mind when comparing SAS and R results, that the two apply different rules for rounding. R rounds to the even digit (i.e. both 1.5 and 2.5 round to 2), while SAS uses \"conventional\" rounding rules (i.e 1.5 is rounded to 2 and 2.5 to 3). This can also occasionally lead to differences in results and may need to be addressed by using a custom rounding function in R, that uses SAS rounding rules. An example of such a function is provided in one of the references given below.\n\n## References\n\n- [SAS PROC GENMOD documentation](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.3/statug/statug_genmod_toc.htm).\n- [R glm.nb documentation](https://www.rdocumentation.org/packages/MASS/versions/7.3-60.0.1/topics/glm.nb).\n- [CrossValidated discussion on covariance estimation](https://stats.stackexchange.com/questions/221648/negative-binomial-regression-in-r-allowing-for-correlation-between-dispersion) (`glm.nb.cov` function is provided in the answer by Jonathan Bartlett).\n- [Discussion of general differences in SAS and R including rounding](https://www.lexjansen.com/phuse-us/2020/ct/CT05.pdf)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_psmatch/execute-results/html.json b/_freeze/Comp/r-sas_psmatch/execute-results/html.json index ffc857b7b..191aa757b 100644 --- a/_freeze/Comp/r-sas_psmatch/execute-results/html.json +++ b/_freeze/Comp/r-sas_psmatch/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "5227b7241bb74da31eabeeb35caa1685", + "hash": "dd4a107f646924baf465853ee611e57e", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Propensity Score Matching\"\nexecute: \n eval: false\n---\n\n# Introduction\n\nPropensity score (PS) matching is a statistical technique widely employed in Real World Evidence (RWE) studies to address confounding bias and facilitate the estimation of causal treatment effects. By estimating the probability of treatment assignment based on observed covariates, PS matching aims to create comparable treatment and control groups. While both SAS and R provide methods to do PS matching, the syntax and available options differ considerably, potentially leading to variations in analytical outcomes. The PS matching process generally involves several key stages: first, the estimation of propensity scores using a regression model; second, the specification of a region of common support to ensure overlap between treatment and control groups; and third, the matching of treated and control subjects based on their calculated propensity scores.\n\n# Differences\n\nGiven the extensive number of parameters and arguments available in both `PROC PSMATCH` and `matchit()`, we will focus only on the observed differences.\n\n## Options\n\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Option | PROC PSMATCH | matchit |\n+=======================================================+===========================================================================================================================+===========================================================================================================================================================================================================================+\n| Distance | *PS, LPS*; only for matching: *mahalanobis*, *euclidean* | *PS*, *euclidean*, *scaled_euclidean*, *mahalanobis*, *robust_mahalanobis* |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| PS methods | logistic regression | glm, gam, gbm, lasso, ridge, elasticnet with different links, partitioning tree, RF, single-hidden-layer neural network, covariate balancing propensity score (CBPS) algorithm, Bayesian additive regression trees (BART) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Region | Always used; *allobs*, *treated* or *cs* (common support), with allowed extension | *none*, *treated*, *control*, *both* (common support) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Caliper | Applies only to the distance metric, such that distance\\<=caliper | Can be applied to both covariates and distance metric. If a positive value is supplied, it functions as in SAS; If a negative value is supplied, it imposes the condition distance \\> abs(caliper). |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| PS std.dev formula when caliper applied as multiplier | sqrt((sd(PS~trt~)^2^+sd(P~control~)^2^)/2) | sd(PS~all~) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Matching methods | *greedy* (greedy nn), *full*, *optimal*, *replace*, *varratio* | *nearest* (greedy nn), *full*, *optimal*, *quick*, *genetic*, *cem*, *exact*, *cardinality*, *subclass* |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Method=optimal options | Only K should be supplied | In addition to k (number of matches), the tol option can also be specified, and its value can significantly impact the matching results. |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Method=full options | n max controls, n max treated, mean n treated, n controls, pct controls | n min controls, n max controls, mean n controls, fraction to omit, tol, solver |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Mahalanobis distance | PS are always calculated to determine the region; only numeric covariates are allowed in MAHVARS option | mahvars accepts any var type; region calculation is ommited when distance=*mahalanobis*, but can be performed when distance=*glm* and mahvars is supplied with a formula |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Covariance matrix for mahalanobis distance | Computed from trt obs and control obs | Is pooled within-group, computed from trt mean-centered covariates of the full sample |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Replace | Could be specified only as method=*replace*; in the output, match IDs are shared among all subjects within a matched set. | Could be specified as an argument (e.g. replace=*TRUE*). In the output, each treated subject receives up to K matched controls |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Estimands | ATT, ATE | ATT, ATE, ATC |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Reestimate | Not available | Could be specified as an argument (e.g. reestimate=*TRUE*) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Exact, anti-exact | Only exact matching is available and can only be performed on variables listed in the CLASS statement. | Both exact and anti-exact matching are available, with no restrictions on the variable types. |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Normalization | Not available | Could be specified as an argument (e.g. normalize=*TRUE*) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n## Output\n\nThe way SAS and R show the results of matching is different. The output from SAS matching is presented as a dataset where each treated subject (or all subjects) is given a row, and a match ID column containing a unique pair (or group) identifier is included:\n\n| | TRTP | \\_MatchID |\n|-----|---------|-----------|\n| 1 | trt | 1 |\n| 2 | trt | 2 |\n| 3 | control | 1 |\n| 4 | control | 2 |\n\nIn contrast, the `matchit()` function in R returns a matrix, where each treated unit's identifier is associated with the identifiers of its k matched control units:\n\n| | \\[,1\\] |\n|-----|--------|\n| 1 | \"3\" |\n| 2 | \"4\" |\n\n### Statistics\n\nIn SAS, descriptive statistics for assessing balance are primarily generated through the `ASSESS` statement within `PROC PSMATCH`. Conversely, in R, balance diagnostics could be obtained by applying the `summary()` function to the output object returned by the `matchit()`. The following table summarizes the balance statistics available in SAS and R:\n\n| Stat | All | Region | Matched |\n|:-----------------------|:-------:|:------:|:-------:|\n| N | SAS & R | SAS | SAS & R |\n| PS mean | SAS & R | SAS | SAS & R |\n| PS std | SAS | SAS | SAS |\n| PS min | SAS | SAS | SAS |\n| PS max | SAS | SAS | SAS |\n| Vars mean | SAS & R | SAS | SAS & R |\n| Vars std | SAS | SAS | SAS |\n| Vars min | SAS | SAS | SAS |\n| Vars max | SAS | SAS | SAS |\n| PS mean diff | SAS | SAS | SAS |\n| PS SMD | SAS & R | SAS | SAS & R |\n| PS perc. red. | SAS | SAS | SAS |\n| PS var ratio | SAS & R | SAS | SAS & R |\n| Vars mean diff | SAS | SAS | SAS |\n| Vars SMD | SAS & R | SAS | SAS & R |\n| Vars perc. red. | SAS | SAS | SAS |\n| Vars var ratio | SAS & R | SAS | SAS & R |\n| PS eCDF min | R | \\- | R |\n| PS eCDF max | R | \\- | R |\n| Vars eCDF min | R | \\- | R |\n| Vars eCDF max | R | \\- | R |\n| PS std pair distance | R | \\- | R |\n| Vars std pair distance | R | \\- | R |\n\n### Figures\n\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| Plot type | R | SAS |\n+============================+========================================================================================================================================================+======================================================================+\n| Love plot | - [plot()]{.underline}: - | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | - [cobalt:]{.underline} many different settings. | **Includes** PS, all numeric and binary variables. |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| General distribution plots | - [plot():]{.underline} **Displayed for:** all/matched. **Includes** all variables; numeric - distribution plots, character and factor - histograms. | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | - [cobalt]{.underline} : Highly customizable plots. | **Includes** PS and all variables. |\n| | | |\n| | | For PS and numeric - boxplots, character - barplots. |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| eCDF plots | [plot():]{.underline} | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | **Displayed for:** all/matched. | **Includes** PS and all numeric variables. |\n| | | |\n| | **Includes** all variables. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| eQQ plots | [plot():]{.underline} | \\- |\n| | | |\n| | **Displayed for:** all/matched. | |\n| | | |\n| | **Includes** all variables. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| Cloud plots | [plot():]{.underline} | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | **Displayed for:** all/matched, trt/control. | **Includes** PS and all numeric variables. |\n| | | |\n| | **Includes** PS. | Presented as 2 separate clouds per variable. |\n| | | |\n| | Presented as 4 separate clouds. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| PS histogram | - [plot()]{.underline}: **Displayed for:** all/matched, trt/control. **Includes** PS. | \\- |\n| | | |\n| | - [cobalt]{.underline}: Highly customizable plots. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n\n# Dataset\n\nThe dataset used in the example below can be found here: \\[ps_data.csv\\]()\n\n``` default\n trt control Standardized\nCharacteristic (N = 120) (N = 180) Mean Diff.\n\n sex 0.1690\n F 70 (58.3 %) 90 (50.0 %) \n M 50 (41.7 %) 90 (50.0 %)\n\n age 61.5 (17.12) 49.4 (10.55) 0.7057\n\n weight 67.3 ( 7.33) 63.8 ( 9.64) 0.4741\n\n bmi_cat \n underweight 34 (28.3 %) 63 (35.0 %) -0.1479 \n normal 57 (47.5 %) 61 (33.9 %) 0.2726\n overweight 29 (24.2 %) 56 (31.1 %) -0.1622\n```\n\n# Matching Examples\n\n## Greedy Nearest Neighbor 1 to 1 matching with common support region\n\n### SAS\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc psmatch data=data region=cs(extend=0);\n class trtp sex bmi_cat;\n psmodel trtp(Treated=\"trt\")= sex weight age bmi_cat;\n match distance=PS \n method=greedy(k=1 order=descending) \n caliper(MULT=ONE)=0.25;\n output out(obs=match)=ps_res matchid=_MatchID ps=_PScore;\nrun;\n```\n:::\n\n\n### R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(MatchIt)\n\nps_res <- MatchIt::matchit(\n trtp ~ sex + weight + age + bmi_cat,\n data = data,\n method = \"nearest\",\n distance = \"glm\",\n link = \"logit\",\n discard = \"both\",\n m.order = \"largest\",\n replace = FALSE,\n caliper = 0.25,\n std.caliper = FALSE,\n ratio = 1,\n normalize = FALSE\n)\n```\n:::\n\n\nThe following arguments, when altered in the previous example, can still produce matching results comparable between SAS and R:\n\n- `region` (`discard`)\n\n- `caliper` value\n\n- `order`\n\n- `k` (`ratio`)\n\n- `exact`", + "markdown": "---\ntitle: \"Propensity Score Matching\"\nexecute:\n eval: false\n---\n\n# Introduction\n\nPropensity score (PS) matching is a statistical technique widely employed in Real World Evidence (RWE) studies to address confounding bias and facilitate the estimation of causal treatment effects. By estimating the probability of treatment assignment based on observed covariates, PS matching aims to create comparable treatment and control groups. While both SAS and R provide methods to do PS matching, the syntax and available options differ considerably, potentially leading to variations in analytical outcomes. The PS matching process generally involves several key stages: first, the estimation of propensity scores using a regression model; second, the specification of a region of common support to ensure overlap between treatment and control groups; and third, the matching of treated and control subjects based on their calculated propensity scores.\n\n# Differences\n\nGiven the extensive number of parameters and arguments available in both `PROC PSMATCH` and `matchit()`, we will focus only on the observed differences.\n\n## Options\n\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Option | PROC PSMATCH | matchit |\n+=======================================================+===========================================================================================================================+===========================================================================================================================================================================================================================+\n| Distance | *PS, LPS*; only for matching: *mahalanobis*, *euclidean* | *PS*, *euclidean*, *scaled_euclidean*, *mahalanobis*, *robust_mahalanobis* |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| PS methods | logistic regression | glm, gam, gbm, lasso, ridge, elasticnet with different links, partitioning tree, RF, single-hidden-layer neural network, covariate balancing propensity score (CBPS) algorithm, Bayesian additive regression trees (BART) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Region | Always used; *allobs*, *treated* or *cs* (common support), with allowed extension | *none*, *treated*, *control*, *both* (common support) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Caliper | Applies only to the distance metric, such that distance\\<=caliper | Can be applied to both covariates and distance metric. If a positive value is supplied, it functions as in SAS; If a negative value is supplied, it imposes the condition distance \\> abs(caliper). |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| PS std.dev formula when caliper applied as multiplier | sqrt((sd(PS~trt~)^2^+sd(P~control~)^2^)/2) | sd(PS~all~) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Matching methods | *greedy* (greedy nn), *full*, *optimal*, *replace*, *varratio* | *nearest* (greedy nn), *full*, *optimal*, *quick*, *genetic*, *cem*, *exact*, *cardinality*, *subclass* |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Method=optimal options | Only K should be supplied | In addition to k (number of matches), the tol option can also be specified, and its value can significantly impact the matching results. |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Method=full options | n max controls, n max treated, mean n treated, n controls, pct controls | n min controls, n max controls, mean n controls, fraction to omit, tol, solver |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Mahalanobis distance | PS are always calculated to determine the region; only numeric covariates are allowed in MAHVARS option | mahvars accepts any var type; region calculation is ommited when distance=*mahalanobis*, but can be performed when distance=*glm* and mahvars is supplied with a formula |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Covariance matrix for mahalanobis distance | Computed from trt obs and control obs | Is pooled within-group, computed from trt mean-centered covariates of the full sample |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Replace | Could be specified only as method=*replace*; in the output, match IDs are shared among all subjects within a matched set. | Could be specified as an argument (e.g. replace=*TRUE*). In the output, each treated subject receives up to K matched controls |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Estimands | ATT, ATE | ATT, ATE, ATC |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Reestimate | Not available | Could be specified as an argument (e.g. reestimate=*TRUE*) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Exact, anti-exact | Only exact matching is available and can only be performed on variables listed in the CLASS statement. | Both exact and anti-exact matching are available, with no restrictions on the variable types. |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Normalization | Not available | Could be specified as an argument (e.g. normalize=*TRUE*) |\n+-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n## Output\n\nThe way SAS and R show the results of matching is different. The output from SAS matching is presented as a dataset where each treated subject (or all subjects) is given a row, and a match ID column containing a unique pair (or group) identifier is included:\n\n| | TRTP | \\_MatchID |\n|-----|---------|-----------|\n| 1 | trt | 1 |\n| 2 | trt | 2 |\n| 3 | control | 1 |\n| 4 | control | 2 |\n\nIn contrast, the `matchit()` function in R returns a matrix, where each treated unit's identifier is associated with the identifiers of its k matched control units:\n\n| | \\[,1\\] |\n|-----|--------|\n| 1 | \"3\" |\n| 2 | \"4\" |\n\n### Statistics\n\nIn SAS, descriptive statistics for assessing balance are primarily generated through the `ASSESS` statement within `PROC PSMATCH`. Conversely, in R, balance diagnostics could be obtained by applying the `summary()` function to the output object returned by the `matchit()`. The following table summarizes the balance statistics available in SAS and R:\n\n| Stat | All | Region | Matched |\n|:-----------------------|:-------:|:------:|:-------:|\n| N | SAS & R | SAS | SAS & R |\n| PS mean | SAS & R | SAS | SAS & R |\n| PS std | SAS | SAS | SAS |\n| PS min | SAS | SAS | SAS |\n| PS max | SAS | SAS | SAS |\n| Vars mean | SAS & R | SAS | SAS & R |\n| Vars std | SAS | SAS | SAS |\n| Vars min | SAS | SAS | SAS |\n| Vars max | SAS | SAS | SAS |\n| PS mean diff | SAS | SAS | SAS |\n| PS SMD | SAS & R | SAS | SAS & R |\n| PS perc. red. | SAS | SAS | SAS |\n| PS var ratio | SAS & R | SAS | SAS & R |\n| Vars mean diff | SAS | SAS | SAS |\n| Vars SMD | SAS & R | SAS | SAS & R |\n| Vars perc. red. | SAS | SAS | SAS |\n| Vars var ratio | SAS & R | SAS | SAS & R |\n| PS eCDF min | R | \\- | R |\n| PS eCDF max | R | \\- | R |\n| Vars eCDF min | R | \\- | R |\n| Vars eCDF max | R | \\- | R |\n| PS std pair distance | R | \\- | R |\n| Vars std pair distance | R | \\- | R |\n\n### Figures\n\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| Plot type | R | SAS |\n+============================+========================================================================================================================================================+======================================================================+\n| Love plot | - [plot()]{.underline}: - | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | - [cobalt:]{.underline} many different settings. | **Includes** PS, all numeric and binary variables. |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| General distribution plots | - [plot():]{.underline} **Displayed for:** all/matched. **Includes** all variables; numeric - distribution plots, character and factor - histograms. | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | - [cobalt]{.underline} : Highly customizable plots. | **Includes** PS and all variables. |\n| | | |\n| | | For PS and numeric - boxplots, character - barplots. |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| eCDF plots | [plot():]{.underline} | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | **Displayed for:** all/matched. | **Includes** PS and all numeric variables. |\n| | | |\n| | **Includes** all variables. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| eQQ plots | [plot():]{.underline} | \\- |\n| | | |\n| | **Displayed for:** all/matched. | |\n| | | |\n| | **Includes** all variables. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| Cloud plots | [plot():]{.underline} | **Displayed for:** all/region/matched/weighted matched, trt/control. |\n| | | |\n| | **Displayed for:** all/matched, trt/control. | **Includes** PS and all numeric variables. |\n| | | |\n| | **Includes** PS. | Presented as 2 separate clouds per variable. |\n| | | |\n| | Presented as 4 separate clouds. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n| PS histogram | - [plot()]{.underline}: **Displayed for:** all/matched, trt/control. **Includes** PS. | \\- |\n| | | |\n| | - [cobalt]{.underline}: Highly customizable plots. | |\n+----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------+\n\n# Dataset\n\nThe dataset used in the example below can be found here: \\[ps_data.csv\\]()\n\n``` default\n trt control Standardized\nCharacteristic (N = 120) (N = 180) Mean Diff.\n\n sex 0.1690\n F 70 (58.3 %) 90 (50.0 %) \n M 50 (41.7 %) 90 (50.0 %)\n\n age 61.5 (17.12) 49.4 (10.55) 0.7057\n\n weight 67.3 ( 7.33) 63.8 ( 9.64) 0.4741\n\n bmi_cat \n underweight 34 (28.3 %) 63 (35.0 %) -0.1479 \n normal 57 (47.5 %) 61 (33.9 %) 0.2726\n overweight 29 (24.2 %) 56 (31.1 %) -0.1622\n```\n\n# Matching Examples\n\n## Greedy Nearest Neighbor 1 to 1 matching with common support region\n\n### SAS\n\n```sas\nproc psmatch data=data region=cs(extend=0);\n class trtp sex bmi_cat;\n psmodel trtp(Treated=\"trt\")= sex weight age bmi_cat;\n match distance=PS \n method=greedy(k=1 order=descending) \n caliper(MULT=ONE)=0.25;\n output out(obs=match)=ps_res matchid=_MatchID ps=_PScore;\nrun;\n```\n\n### R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(MatchIt)\n\nps_res <- MatchIt::matchit(\n trtp ~ sex + weight + age + bmi_cat,\n data = data,\n method = \"nearest\",\n distance = \"glm\",\n link = \"logit\",\n discard = \"both\",\n m.order = \"largest\",\n replace = FALSE,\n caliper = 0.25,\n std.caliper = FALSE,\n ratio = 1,\n normalize = FALSE\n)\n```\n:::\n\n\nThe following arguments, when altered in the previous example, can still produce matching results comparable between SAS and R:\n\n- `region` (`discard`)\n\n- `caliper` value\n\n- `order`\n\n- `k` (`ratio`)\n\n- `exact`", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_survival/execute-results/html.json b/_freeze/Comp/r-sas_survival/execute-results/html.json index 36505787e..f1cf581aa 100644 --- a/_freeze/Comp/r-sas_survival/execute-results/html.json +++ b/_freeze/Comp/r-sas_survival/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "9cc45a3506267e1ee4f690fb5d40a8ea", + "hash": "bb8cc0e7c77f547f9b1623131c93e749", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS - Kaplan Meier and Cox-proportion hazards modelling\"\nexecute: \n eval: false\n---\n\n# Comparison of SAS vs R\n\nThe following table shows the options available in SAS and R for Kaplan Meier and Cox Proportional Hazards modelling, the capabilities of each language, and whether or not the results from each language match.\n\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Analysis | Supported in R using {survival} | Supported in SAS | Results Match | Notes |\n+===============================================================================+==============================================+========================================+===============+======================================================================================================================================+\n| Kaplan Meier with confidence intervals using log-log method | Yes (using the option conf.type = \"log-log\") | Yes (Default) | Mostly | 1\\) Survival estimates can disagree when last event is censored and survival estimate does not cross the percentile being estimated. |\n| | | | | |\n| | | | | 2\\) Survival estimates at time X can disagree when the time X is after the last observed censored time |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Kaplan Meier with confidence intervals using log method | Yes (Default) | Yes (using the option conftype=log) | Mostly | As above. |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Cox Proportional Hazards Model using breslow method for ties | Yes (using the option ties=\"breslow\") | Yes (Default) | Yes | |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Cox Proportional Hazards Model using efron method for ties | Yes (Default) | Yes (using the option ties=efron) | Yes | |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Cox Proportional Hazards Model using exact partial likelihood method for ties | Yes (using the option ties=\"exact\") | Yes (using the option ties=\"discrete\") | Yes | The option ties=\"exact\" in SAS uses the exact marginal likelihood which is not available in R |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n\nResults from the examples shown for R [here](https://psiaims.github.io/CAMIS/R/survival.html) and SAS [here](https://psiaims.github.io/CAMIS/SAS/survival.html) were compared below.\n\nComparing the non-stratified model results side-by-side, the CIs for the quartile estimates and landmark estimates are different between R and SAS. HR and CI also have slight differences.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_default.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Reason 1: Cox Regression Handling of Tied Survival Times\n\nThe default methods for handling ties in a Cox regression model are different which can lead to a different result for the Hazard ratio and associated confidence interval.\n\nR uses \"efron\" by default. SAS uses \"breslow\" by default. Both R and SAS are able to change these default options. By making the changes to the code below, we can force R to use \"breslow\" to match SAS, or SAS to use \"efron\" to match R. When the software use the same methods, then we obtain an identical HR and CI.\n\n- R: change method for ties to use \"breslow\"\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox <- survival::coxph(\n survival::Surv(LENFOLY, FSTAT) ~ AFB,\n ties = \"breslow\",\n data = dat\n)\n```\n:::\n\n\n- SAS: change method for ties to use \"efron\"\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=dat;\n class afb;\n model lenfol*fstat(0) = afb/rl ties = efron;\nrun;\n```\n:::\n\n\nIf there are no tied event times, then the methods are equivalent.\n\nThe Breslow approximation is the easiest to program and hence it historically became the first option coded for almost all software. It then ended up as the default option when other options were added in order to maintain \"backwards compatibility\". The Efron option is more accurate if there are a large number of ties, and it was therefore selected as the default option in R. In practice the number of ties is usually small, in which case all the methods are statistically indistinguishable.\n\nFrom the arguments of `coxph` in R, there are three possible choices for handling tied event times 'ties=breslow', 'ties=efron', or 'ties=exact'. This last option is an exact partial likelihood approach, and corresponds to the \"discrete\" method in SAS. See [here](https://www.rdocumentation.org/packages/survival/versions/3.5-8/topics/coxph) for more detail. (For {survival} versions prior to 3.2-14, the options are 'ties=breslow', 'ties=efron', or 'ties=logit'.)\n\n## Reason 2: Kaplan Meier Median Survival Confidence Intervals\n\nThe default methods for calculation of the confidence interval of a KM estimator are different in the two languages (for example, for calculation of the CI associated with the Median Survival estimate, the 25th percentile and the 75th percentile).\n\nR uses \"log\" by default, and SAS uses \"log-log\" by default. As shown below, using 'conf.type' option, R can be forced to use the \"log-log\" method to match SAS. Alternatively, using the 'conftype=' option, SAS can be forced to use the \"log\" method to match R.\n\n- R: change to \"log-log\"\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km <- survival::survfit(\n survival::Surv(LENFOLY, FSTAT) ~ AFB,\n conf.type = \"log-log\",\n data = dat\n)\n```\n:::\n\n\n- SAS: change to \"log\"\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc lifetest data=dat conftype=log;\n time lenfoly*fstat(0);\n strata afb;\nrun;\n```\n:::\n\n\n\"log-log\" prevents the problem of having confidence intervals of \\>1 or \\<0, which might happen if using \"log\" transformation. However, both R and SAS will clip the interval at \\[0, 1\\] and report a bound \\>1 as 1 and \\<0 as 0.\n\nFrom a [reference](https://myweb.uiowa.edu/pbreheny/7210/f15/notes/9-10.pdf): The appeal of the log-log interval is clear, but the log-scale interval has the advantage of variance stabilization. As a result, simulation studies have generally found it to have better (closer to nominal) coverage; for this reason, it is the default in the `survival` package.\n\nNow if we change the confidence interval type in SAS to \"log\" and tie handling to \"efron\", the results will be identical to the results in R.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_chg_default.png){fig-align='center' width=75%}\n:::\n:::\n\n\nBelow is the side-by-side comparison for stratified analysis with default methods in SAS matched to R's, the results are also identical.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_stratified.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Reason 3: Convergence Criteria in Cox Proportional Hazards Model\n\nAnother source of discrepancy between R and SAS in Cox models can arise from the default convergence criteria used by the two software packages.\n\nIn R, the `survival::coxph()` function has a default convergence criterion for the relative change in log partial likelihood set at `1e-9`. On the other hand, SAS's `PHREG` procedure uses a default convergence criterion for the relative gradient convergence set at `1e-8`. This discrepancy in the convergence criteria can lead to slight differences in the hazard ratios (HR) obtained from the two software packages.\n\nTo achieve comparable results, it is possible to adjust the convergence criteria in SAS to match the more stringent criteria used by R. This can be done by specifying the `fconv` option in the model statement within `PHREG` to change the criteria to relative function convergence with a value of `1e-9`.\n\n- R: default convergence criterion\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox <- survival::coxph(survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\n```\n:::\n\n\n- SAS: adjust convergence criterion\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=dat;\n class afb;\n model lenfol*fstat(0) = afb / rl fconv = 1e-9;\nrun;\n```\n:::\n\n\nBy making this adjustment, the hazard ratios obtained from SAS will align more closely with those from R or even achieve bitwise reproducibility.\n\nThe convergence criterion details are described in their documentation:\n\n- [SAS PHREG documentation](https://support.sas.com/documentation/onlinedoc/stat/131/phreg.pdf).\n- [R `survival::coxph()` documentation](https://stat.ethz.ch/R-manual/R-devel/library/survival/html/coxph.html).\n- [R `survival::coxph.control()` ancillary arguments documentation](https://stat.ethz.ch/R-manual/R-devel/library/survival/html/coxph.control.html).\n\n# Other Cases Where Discrepancies Are Found\n\nNow we look at other cases when the data has some special type which causes a mismatch between SAS and R. Suppose a dataset has 10 observations, and the first 5 are all events, and the last 5 are all censored.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntest <- tibble(\n time = c(54, 75, 77, 84, 87, 92, 103, 105, 112, 118),\n status = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 0)\n)\n\ntest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 10 × 2\n time status\n \n 1 54 1\n 2 75 1\n 3 77 1\n 4 84 1\n 5 87 1\n 6 92 0\n 7 103 0\n 8 105 0\n 9 112 0\n10 118 0\n```\n\n\n:::\n:::\n\n\n## Differences Observed in the KM Estimators\n\nSuppose we are interested to know the 25%, 50% and 75% quartile estimates, and the day 80, 100, and 120 estimates.\n\nBelow is the R code:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km <- survival::survfit(\n survival::Surv(time, status) ~ 1,\n conf.type = \"log-log\",\n data = test\n)\n\n## quantile estimates\nquantile(fit.km, probs = c(0.25, 0.5, 0.75))\n\n## landmark estimates at 80, 100, 120-day\nsummary(fit.km, times = c(80, 100, 120), extend = T)\n```\n:::\n\n\nBelow is the SAS code:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc lifetest data=dat outsurv=_SurvEst timelist= 80 100 120 reduceout stderr; \n time lenfoly*fstat(0);\nrun;\n```\n:::\n\n\nBelow is the side-by-side comparison:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_special.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Reasons\n\nThe reasons for the differences are because:\n\n**Reason 1: Survival estimate does not cross the 50% percentile.**\n\nThe kth quantile for a survival curve S(t) is the location at which a horizontal line at height p= 1-k intersects the plot of S(t) as shown in the KM curve below. Since S(t) is a step function, it is possible for the curve to have a horizontal segment at exactly 1-k, in which case the midpoint of the horizontal segment is returned.\n\nFor example, using the data above, the survival probability is exactly 0.5 at time=87 and remains at 0.5 until the last censored observation at 118.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/other_diff_km.png){fig-align='center' width=75%}\n:::\n:::\n\n\nWhen using R, the median is the smallest time which survival estimate is \\<= 0.5 --\\> `(87+118) / 2 = 102.5.` However, SAS searches the smallest time which survival estimate is \\< 0.5, which does not exist in this dataset, so it gives \"NE\" (Not evaluable).\n\n\n::: {.cell}\n\n```{.r .cell-code}\npl <- survminer::ggsurvplot(fit.km, conf.int = TRUE, ggtheme = theme_light())\n\npl$plot + geom_hline(yintercept = 0.5, color = \"black\", linetype = \"solid\")\n\nsummary(fit.km)\n```\n:::\n\n\n**Reason 2: Last event censored and prior to the required landmark estimate.**\n\nFor the 120-day event-free estimate, SAS considers that 120 days is beyond the maximum observed day in the data (which was a censored event at time =118). Therefore, SAS considers this as Unknown and returns a result of \"NE\" (Not-evaluable). However, R uses the rate at last observed censored date to estimate the 120-day event free rate. As the event-free estimate at time of the last censored event at 118 was 0.5 (0.184, 0.753), R makes the assumption that this is the best estimate for the event-free rate at Time =120.\n\nIf we change the last observation in the dataset to be an event (instead of censored), R and SAS will both give 0 for the event-free survival estimate, because it is for sure that all subjects did not survive beyond 120 days.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntest <- tibble(\n time = c(54, 75, 77, 84, 87, 92, 103, 105, 112, 118),\n status = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 1)\n)\n\ntest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 10 × 2\n time status\n \n 1 54 1\n 2 75 1\n 3 77 1\n 4 84 1\n 5 87 1\n 6 92 0\n 7 103 0\n 8 105 0\n 9 112 0\n10 118 1\n```\n\n\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_special_lst.png){fig-align='center' width=75%}\n:::\n:::\n\n\n# References\n\nBreheny P. \"Inference for the Kaplan-Meier Estimator.\" https://myweb.uiowa.edu/pbreheny/7210/f15/notes/9-10.pdf\n\nBreslow, N. E. (1974) \"Covariance Analysis of Censored Survival Data.\" Biometrics 30:89--99.\n\nEfron, B. (1977. \"The Efficiency of Cox's Likelihood Function for Censored Data.\" Journal of the American Statistical Association 72:557--565.\n\nEmmerson J. and Brown J. M. \"Understanding Survival Analysis in Clinical Trials.\" Clinical Onclogy 33:12-14.\n\nFranklin D. \"Our Survival Confidence Intervals are not the Same!\" PharmaSUG 2014 - Paper SP10. https://www.pharmasug.org/proceedings/2014/SP/PharmaSUG-2014-SP10.pdf\n\nHertz-Picciotto I. and Rockhill B. (1997) \"Validity and efficiency of approximation methods for tied survival times in Cox regression.\" Biometrics 53:1151-1156.\n\nHosmer, D.W. and Lemeshow, S. and May, S. (2008) \"Applied Survival Analysis: Regression Modeling of Time to Event Data: Second Edition.\" John Wiley and Sons Inc., New York, NY\n\n[SAS PROC LIFETEST Documentation](https://documentation.sas.com/doc/en/statug/15.2/statug_lifetest_details03.htm)\n\n[SAS PROC PHREG Documentation](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_phreg_toc.htm)", + "markdown": "---\ntitle: \"R vs SAS - Kaplan Meier and Cox-proportion hazards modelling\"\n---\n\n# Comparison of SAS vs R\n\nThe following table shows the options available in SAS and R for Kaplan Meier and Cox Proportional Hazards modelling, the capabilities of each language, and whether or not the results from each language match.\n\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Analysis | Supported in R using {survival} | Supported in SAS | Results Match | Notes |\n+===============================================================================+==============================================+========================================+===============+======================================================================================================================================+\n| Kaplan Meier with confidence intervals using log-log method | Yes (using the option conf.type = \"log-log\") | Yes (Default) | Mostly | 1\\) Survival estimates can disagree when last event is censored and survival estimate does not cross the percentile being estimated. |\n| | | | | |\n| | | | | 2\\) Survival estimates at time X can disagree when the time X is after the last observed censored time |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Kaplan Meier with confidence intervals using log method | Yes (Default) | Yes (using the option conftype=log) | Mostly | As above. |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Cox Proportional Hazards Model using breslow method for ties | Yes (using the option ties=\"breslow\") | Yes (Default) | Yes | |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Cox Proportional Hazards Model using efron method for ties | Yes (Default) | Yes (using the option ties=efron) | Yes | |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n| Cox Proportional Hazards Model using exact partial likelihood method for ties | Yes (using the option ties=\"exact\") | Yes (using the option ties=\"discrete\") | Yes | The option ties=\"exact\" in SAS uses the exact marginal likelihood which is not available in R |\n+-------------------------------------------------------------------------------+----------------------------------------------+----------------------------------------+---------------+--------------------------------------------------------------------------------------------------------------------------------------+\n\nResults from the examples shown for R [here](https://psiaims.github.io/CAMIS/R/survival.html) and SAS [here](https://psiaims.github.io/CAMIS/SAS/survival.html) were compared below.\n\nComparing the non-stratified model results side-by-side, the CIs for the quartile estimates and landmark estimates are different between R and SAS. HR and CI also have slight differences.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_default.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Reason 1: Cox Regression Handling of Tied Survival Times\n\nThe default methods for handling ties in a Cox regression model are different which can lead to a different result for the Hazard ratio and associated confidence interval.\n\nR uses \"efron\" by default. SAS uses \"breslow\" by default. Both R and SAS are able to change these default options. By making the changes to the code below, we can force R to use \"breslow\" to match SAS, or SAS to use \"efron\" to match R. When the software use the same methods, then we obtain an identical HR and CI.\n\n- R: change method for ties to use \"breslow\"\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox <- survival::coxph(\n survival::Surv(LENFOLY, FSTAT) ~ AFB,\n ties = \"breslow\",\n data = dat\n)\n```\n:::\n\n\n- SAS: change method for ties to use \"efron\"\n\n```sas\nproc phreg data=dat;\n class afb;\n model lenfol*fstat(0) = afb/rl ties = efron;\nrun;\n```\n\nIf there are no tied event times, then the methods are equivalent.\n\nThe Breslow approximation is the easiest to program and hence it historically became the first option coded for almost all software. It then ended up as the default option when other options were added in order to maintain \"backwards compatibility\". The Efron option is more accurate if there are a large number of ties, and it was therefore selected as the default option in R. In practice the number of ties is usually small, in which case all the methods are statistically indistinguishable.\n\nFrom the arguments of `coxph` in R, there are three possible choices for handling tied event times 'ties=breslow', 'ties=efron', or 'ties=exact'. This last option is an exact partial likelihood approach, and corresponds to the \"discrete\" method in SAS. See [here](https://www.rdocumentation.org/packages/survival/versions/3.5-8/topics/coxph) for more detail. (For {survival} versions prior to 3.2-14, the options are 'ties=breslow', 'ties=efron', or 'ties=logit'.)\n\n## Reason 2: Kaplan Meier Median Survival Confidence Intervals\n\nThe default methods for calculation of the confidence interval of a KM estimator are different in the two languages (for example, for calculation of the CI associated with the Median Survival estimate, the 25th percentile and the 75th percentile).\n\nR uses \"log\" by default, and SAS uses \"log-log\" by default. As shown below, using 'conf.type' option, R can be forced to use the \"log-log\" method to match SAS. Alternatively, using the 'conftype=' option, SAS can be forced to use the \"log\" method to match R.\n\n- R: change to \"log-log\"\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km <- survival::survfit(\n survival::Surv(LENFOLY, FSTAT) ~ AFB,\n conf.type = \"log-log\",\n data = dat\n)\n```\n:::\n\n\n- SAS: change to \"log\"\n\n```sas\nproc lifetest data=dat conftype=log;\n time lenfoly*fstat(0);\n strata afb;\nrun;\n```\n\n\"log-log\" prevents the problem of having confidence intervals of \\>1 or \\<0, which might happen if using \"log\" transformation. However, both R and SAS will clip the interval at \\[0, 1\\] and report a bound \\>1 as 1 and \\<0 as 0.\n\nFrom a [reference](https://myweb.uiowa.edu/pbreheny/7210/f15/notes/9-10.pdf): The appeal of the log-log interval is clear, but the log-scale interval has the advantage of variance stabilization. As a result, simulation studies have generally found it to have better (closer to nominal) coverage; for this reason, it is the default in the `survival` package.\n\nNow if we change the confidence interval type in SAS to \"log\" and tie handling to \"efron\", the results will be identical to the results in R.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_chg_default.png){fig-align='center' width=75%}\n:::\n:::\n\n\nBelow is the side-by-side comparison for stratified analysis with default methods in SAS matched to R's, the results are also identical.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_stratified.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Reason 3: Convergence Criteria in Cox Proportional Hazards Model\n\nAnother source of discrepancy between R and SAS in Cox models can arise from the default convergence criteria used by the two software packages.\n\nIn R, the `survival::coxph()` function has a default convergence criterion for the relative change in log partial likelihood set at `1e-9`. On the other hand, SAS's `PHREG` procedure uses a default convergence criterion for the relative gradient convergence set at `1e-8`. This discrepancy in the convergence criteria can lead to slight differences in the hazard ratios (HR) obtained from the two software packages.\n\nTo achieve comparable results, it is possible to adjust the convergence criteria in SAS to match the more stringent criteria used by R. This can be done by specifying the `fconv` option in the model statement within `PHREG` to change the criteria to relative function convergence with a value of `1e-9`.\n\n- R: default convergence criterion\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox <- survival::coxph(survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\n```\n:::\n\n\n- SAS: adjust convergence criterion\n\n```sas\nproc phreg data=dat;\n class afb;\n model lenfol*fstat(0) = afb / rl fconv = 1e-9;\nrun;\n```\n\nBy making this adjustment, the hazard ratios obtained from SAS will align more closely with those from R or even achieve bitwise reproducibility.\n\nThe convergence criterion details are described in their documentation:\n\n- [SAS PHREG documentation](https://support.sas.com/documentation/onlinedoc/stat/131/phreg.pdf).\n- [R `survival::coxph()` documentation](https://stat.ethz.ch/R-manual/R-devel/library/survival/html/coxph.html).\n- [R `survival::coxph.control()` ancillary arguments documentation](https://stat.ethz.ch/R-manual/R-devel/library/survival/html/coxph.control.html).\n\n# Other Cases Where Discrepancies Are Found\n\nNow we look at other cases when the data has some special type which causes a mismatch between SAS and R. Suppose a dataset has 10 observations, and the first 5 are all events, and the last 5 are all censored.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntest <- tibble(\n time = c(54, 75, 77, 84, 87, 92, 103, 105, 112, 118),\n status = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 0)\n)\n\ntest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 10 × 2\n time status\n \n 1 54 1\n 2 75 1\n 3 77 1\n 4 84 1\n 5 87 1\n 6 92 0\n 7 103 0\n 8 105 0\n 9 112 0\n10 118 0\n```\n\n\n:::\n:::\n\n\n## Differences Observed in the KM Estimators\n\nSuppose we are interested to know the 25%, 50% and 75% quartile estimates, and the day 80, 100, and 120 estimates.\n\nBelow is the R code:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km <- survival::survfit(\n survival::Surv(time, status) ~ 1,\n conf.type = \"log-log\",\n data = test\n)\n\n## quantile estimates\nquantile(fit.km, probs = c(0.25, 0.5, 0.75))\n\n## landmark estimates at 80, 100, 120-day\nsummary(fit.km, times = c(80, 100, 120), extend = T)\n```\n:::\n\n\nBelow is the SAS code:\n\n```sas\nproc lifetest data=dat outsurv=_SurvEst timelist= 80 100 120 reduceout stderr; \n time lenfoly*fstat(0);\nrun;\n```\n\nBelow is the side-by-side comparison:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_special.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Reasons\n\nThe reasons for the differences are because:\n\n**Reason 1: Survival estimate does not cross the 50% percentile.**\n\nThe kth quantile for a survival curve S(t) is the location at which a horizontal line at height p= 1-k intersects the plot of S(t) as shown in the KM curve below. Since S(t) is a step function, it is possible for the curve to have a horizontal segment at exactly 1-k, in which case the midpoint of the horizontal segment is returned.\n\nFor example, using the data above, the survival probability is exactly 0.5 at time=87 and remains at 0.5 until the last censored observation at 118.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/other_diff_km.png){fig-align='center' width=75%}\n:::\n:::\n\n\nWhen using R, the median is the smallest time which survival estimate is \\<= 0.5 --\\> `(87+118) / 2 = 102.5.` However, SAS searches the smallest time which survival estimate is \\< 0.5, which does not exist in this dataset, so it gives \"NE\" (Not evaluable).\n\n\n::: {.cell}\n\n```{.r .cell-code}\npl <- survminer::ggsurvplot(fit.km, conf.int = TRUE, ggtheme = theme_light())\n\npl$plot + geom_hline(yintercept = 0.5, color = \"black\", linetype = \"solid\")\n\nsummary(fit.km)\n```\n:::\n\n\n**Reason 2: Last event censored and prior to the required landmark estimate.**\n\nFor the 120-day event-free estimate, SAS considers that 120 days is beyond the maximum observed day in the data (which was a censored event at time =118). Therefore, SAS considers this as Unknown and returns a result of \"NE\" (Not-evaluable). However, R uses the rate at last observed censored date to estimate the 120-day event free rate. As the event-free estimate at time of the last censored event at 118 was 0.5 (0.184, 0.753), R makes the assumption that this is the best estimate for the event-free rate at Time =120.\n\nIf we change the last observation in the dataset to be an event (instead of censored), R and SAS will both give 0 for the event-free survival estimate, because it is for sure that all subjects did not survive beyond 120 days.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntest <- tibble(\n time = c(54, 75, 77, 84, 87, 92, 103, 105, 112, 118),\n status = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 1)\n)\n\ntest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 10 × 2\n time status\n \n 1 54 1\n 2 75 1\n 3 77 1\n 4 84 1\n 5 87 1\n 6 92 0\n 7 103 0\n 8 105 0\n 9 112 0\n10 118 1\n```\n\n\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/r_sas_special_lst.png){fig-align='center' width=75%}\n:::\n:::\n\n\n# References\n\nBreheny P. \"Inference for the Kaplan-Meier Estimator.\" https://myweb.uiowa.edu/pbreheny/7210/f15/notes/9-10.pdf\n\nBreslow, N. E. (1974) \"Covariance Analysis of Censored Survival Data.\" Biometrics 30:89--99.\n\nEfron, B. (1977. \"The Efficiency of Cox's Likelihood Function for Censored Data.\" Journal of the American Statistical Association 72:557--565.\n\nEmmerson J. and Brown J. M. \"Understanding Survival Analysis in Clinical Trials.\" Clinical Onclogy 33:12-14.\n\nFranklin D. \"Our Survival Confidence Intervals are not the Same!\" PharmaSUG 2014 - Paper SP10. https://www.pharmasug.org/proceedings/2014/SP/PharmaSUG-2014-SP10.pdf\n\nHertz-Picciotto I. and Rockhill B. (1997) \"Validity and efficiency of approximation methods for tied survival times in Cox regression.\" Biometrics 53:1151-1156.\n\nHosmer, D.W. and Lemeshow, S. and May, S. (2008) \"Applied Survival Analysis: Regression Modeling of Time to Event Data: Second Edition.\" John Wiley and Sons Inc., New York, NY\n\n[SAS PROC LIFETEST Documentation](https://documentation.sas.com/doc/en/statug/15.2/statug_lifetest_details03.htm)\n\n[SAS PROC PHREG Documentation](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_phreg_toc.htm)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_survival_cif/execute-results/html.json b/_freeze/Comp/r-sas_survival_cif/execute-results/html.json index b7bf2940f..288217ff2 100644 --- a/_freeze/Comp/r-sas_survival_cif/execute-results/html.json +++ b/_freeze/Comp/r-sas_survival_cif/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "077fddef664329230c4523bba2e7ecad", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS - Estimating Cumulative Incidence Functions\"\n---\n\n# Comparison of R and SAS\n\nThe following table shows the options available in R and SAS for estimating cumulative incidence functions (CIFs) in a competing risk analysis, especially the capabilities and whether the results match.\n\n| Analysis | Supported in R package `tidycmprsk` | Supported in SAS `PROC LIFETEST` | Results Match |\n|--------------------|------------------|------------------|------------------|\n| CIF estimates | Yes: with function `cuminc()` | Yes: with `eventcode` option in `TIME` statement | Yes |\n| Gray's test for equality across groups | Yes: default when the group variable (a factor) is on the right-hand side of the input formula | Yes: default with `strata` statement | Yes |\n| Variance estimates for the CIF estimates using Aalen (1978) | Yes: default | Yes (default) | Yes |\n| Variance estimates for the CIF estimates using the delta method | No | Yes: with option `error=delta` in `PROC TEST` statement | N/A |\n| Confidence intervals for CIF estimates using log-log transformation | Yes: default | Yes: default | Yes |\n| Confidence intervals for CIF estimates using other transformations | No | Yes: with `conftype` option in `LIFETEST` statement | N/A |\n| CIF estimates for specified time points | Yes: with `times` option when summarizing results, e.g., using `tidy()` | Yes: with `timelist` option in `LIFETEST` statement | Yes |\n| CIF plot by groups | Yes: with `ggsurvfit::ggcumin()` | Yes: with `plots=cif` option in `LIFETEST` statement | N/A |\n\nAdditional details for using `tidycmprsk` are given [here](https://psiaims.github.io/CAMIS/R/survival_cif.html \"cif in r\") and for SAS `PROC LIFETEST` [here](https://psiaims.github.io/CAMIS/SAS/survival_cif.html \"cif in sas\") .\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-24\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n cmprsk 2.2-12 2024-05-19 [1] RSPM\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n tidycmprsk 1.1.1 2025-11-14 [1] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n# References\n\n[SAS PROC LIFETEST Documentation on CIF estimates](https://documentation.sas.com/doc/en/statug/15.2/statug_lifetest_details25.htm#statug.lifetest.lftcifest \"cif in sas\")\n\n[R package 'tidycmprsk' Documentation](https://github.com/MSKCC-Epi-Bio/tidycmprsk \"cif in r\")\n", + "markdown": "---\ntitle: \"R vs SAS - Estimating Cumulative Incidence Functions\"\n---\n\n# Comparison of R and SAS\n\nThe following table shows the options available in R and SAS for estimating cumulative incidence functions (CIFs) in a competing risk analysis, especially the capabilities and whether the results match.\n\n| Analysis | Supported in R package `tidycmprsk` | Supported in SAS `PROC LIFETEST` | Results Match |\n|--------------------|------------------|------------------|------------------|\n| CIF estimates | Yes: with function `cuminc()` | Yes: with `eventcode` option in `TIME` statement | Yes |\n| Gray's test for equality across groups | Yes: default when the group variable (a factor) is on the right-hand side of the input formula | Yes: default with `strata` statement | Yes |\n| Variance estimates for the CIF estimates using Aalen (1978) | Yes: default | Yes (default) | Yes |\n| Variance estimates for the CIF estimates using the delta method | No | Yes: with option `error=delta` in `PROC TEST` statement | N/A |\n| Confidence intervals for CIF estimates using log-log transformation | Yes: default | Yes: default | Yes |\n| Confidence intervals for CIF estimates using other transformations | No | Yes: with `conftype` option in `LIFETEST` statement | N/A |\n| CIF estimates for specified time points | Yes: with `times` option when summarizing results, e.g., using `tidy()` | Yes: with `timelist` option in `LIFETEST` statement | Yes |\n| CIF plot by groups | Yes: with `ggsurvfit::ggcumin()` | Yes: with `plots=cif` option in `LIFETEST` statement | N/A |\n\nAdditional details for using `tidycmprsk` are given [here](https://psiaims.github.io/CAMIS/R/survival_cif.html \"cif in r\") and for SAS `PROC LIFETEST` [here](https://psiaims.github.io/CAMIS/SAS/survival_cif.html \"cif in sas\") .\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n cmprsk 2.2-12 2024-05-19 [1] RSPM (R 4.5.0)\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n tidycmprsk 1.1.1 2025-11-14 [1] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n# References\n\n[SAS PROC LIFETEST Documentation on CIF estimates](https://documentation.sas.com/doc/en/statug/15.2/statug_lifetest_details25.htm#statug.lifetest.lftcifest \"cif in sas\")\n\n[R package 'tidycmprsk' Documentation](https://github.com/MSKCC-Epi-Bio/tidycmprsk \"cif in r\")\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_survival_csh/execute-results/html.json b/_freeze/Comp/r-sas_survival_csh/execute-results/html.json index 1c0c1622a..bcd0e5aaa 100644 --- a/_freeze/Comp/r-sas_survival_csh/execute-results/html.json +++ b/_freeze/Comp/r-sas_survival_csh/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "b309f1c14f7f5fd8c8f488305a56c0d0", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS - Estimating and Testing Cause-Specific Hazard\"\n---\n\n# Comparison of R and SAS\n\nThe following table shows the options available in R and SAS for estimating and testing cause-specific hazard in a competing risk analysis, especially the capabilities and whether the results match.\n\n| Analysis | Supported in R package `survival` | Supported in SAS `PROC PHREG` | Results Match |\n|-------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|--------------------------|\n| Cause-specific hazard ratio estimates | Yes: with `coxph()` | Yes | Yes |\n| Stratified cause-specific hazard ratio estimates | Yes: with the stratification variable `x` specified as `strata(factor(x))` on the right-hand side of the input formula | Yes: with `strata` statement | Yes |\n| Variance estimates for the parameter estimates with robust sandwich estimator | Yes: default (`robust = TRUE)` | Yes: with `covsandwich` or `covs` option in `proc phreg` statement | Yes |\n| Confidence intervals for hazard ratio estimates | Yes: Wald's method by default | Yes: Wald's method by default | Yes |\n| Estimating cause specific hazard for multiple events | Yes | Yes | Depends (see note below) |\n\nAdditional details for using `survival` in R are given [here](https://psiaims.github.io/CAMIS/R/survival_csh.html \"csh in r\") and for SAS `PROC PHREG` [here](https://psiaims.github.io/CAMIS/SAS/survival_csh.html \"csh in sas\") .\n\n### Estimating cause specific hazard ratios for multiple events\n\nR and SAS have different approach when it comes to estimating the hazard ratios for multiple events. Results for the hazard ratio estimates are the same between the two; what is different is the global hypothesis:\n\n- The global hypothesis per `coxph()` in this case is \"There is no difference in the hazards of experiencing any of the events.\"\n\n- In `PROC PHREG`, one syntax allows the hazard ratio estimates to be generated for all events. However, there is no corresponding global hypothesis as in `coxph()` in R. In SAS, there are only individual global hypotheses, one for each event. In addition, currently, when this syntax is used in SAS, stratified analysis cannot be implemented.\n\n## Summary\n\n- Most of the functionality of `survival::coxph()` and `proc phreg` also apply to estimating cause-specific hazards in competing risks settings.\n\n- Due to the different internal numerical estimation methods of R and SAS, results only match up to the 4th decimal places. However, overall consistency can be established between the two for estimating and testing cause-specific hazard ratio using Cox's PH model.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-24\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n# References\n\n[SAS PROC LIFETEST Documentation on CIF estimates](https://documentation.sas.com/doc/en/statug/15.2/statug_lifetest_details25.htm#statug.lifetest.lftcifest \"cif in sas\")\n\n[R package 'tidycmprsk' Documentation](https://github.com/MSKCC-Epi-Bio/tidycmprsk \"cif in r\")\n", + "markdown": "---\ntitle: \"R vs SAS - Estimating and Testing Cause-Specific Hazard\"\n---\n\n# Comparison of R and SAS\n\nThe following table shows the options available in R and SAS for estimating and testing cause-specific hazard in a competing risk analysis, especially the capabilities and whether the results match.\n\n| Analysis | Supported in R package `survival` | Supported in SAS `PROC PHREG` | Results Match |\n|-------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|--------------------------|\n| Cause-specific hazard ratio estimates | Yes: with `coxph()` | Yes | Yes |\n| Stratified cause-specific hazard ratio estimates | Yes: with the stratification variable `x` specified as `strata(factor(x))` on the right-hand side of the input formula | Yes: with `strata` statement | Yes |\n| Variance estimates for the parameter estimates with robust sandwich estimator | Yes: default (`robust = TRUE)` | Yes: with `covsandwich` or `covs` option in `proc phreg` statement | Yes |\n| Confidence intervals for hazard ratio estimates | Yes: Wald's method by default | Yes: Wald's method by default | Yes |\n| Estimating cause specific hazard for multiple events | Yes | Yes | Depends (see note below) |\n\nAdditional details for using `survival` in R are given [here](https://psiaims.github.io/CAMIS/R/survival_csh.html \"csh in r\") and for SAS `PROC PHREG` [here](https://psiaims.github.io/CAMIS/SAS/survival_csh.html \"csh in sas\") .\n\n### Estimating cause specific hazard ratios for multiple events\n\nR and SAS have different approach when it comes to estimating the hazard ratios for multiple events. Results for the hazard ratio estimates are the same between the two; what is different is the global hypothesis:\n\n- The global hypothesis per `coxph()` in this case is \"There is no difference in the hazards of experiencing any of the events.\"\n\n- In `PROC PHREG`, one syntax allows the hazard ratio estimates to be generated for all events. However, there is no corresponding global hypothesis as in `coxph()` in R. In SAS, there are only individual global hypotheses, one for each event. In addition, currently, when this syntax is used in SAS, stratified analysis cannot be implemented.\n\n## Summary\n\n- Most of the functionality of `survival::coxph()` and `proc phreg` also apply to estimating cause-specific hazards in competing risks settings.\n\n- Due to the different internal numerical estimation methods of R and SAS, results only match up to the 4th decimal places. However, overall consistency can be established between the two for estimating and testing cause-specific hazard ratio using Cox's PH model.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n# References\n\n[SAS PROC LIFETEST Documentation on CIF estimates](https://documentation.sas.com/doc/en/statug/15.2/statug_lifetest_details25.htm#statug.lifetest.lftcifest \"cif in sas\")\n\n[R package 'tidycmprsk' Documentation](https://github.com/MSKCC-Epi-Bio/tidycmprsk \"cif in r\")\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/Comp/r-sas_ttest_2Sample/execute-results/html.json b/_freeze/Comp/r-sas_ttest_2Sample/execute-results/html.json index a0a21214f..21effc784 100644 --- a/_freeze/Comp/r-sas_ttest_2Sample/execute-results/html.json +++ b/_freeze/Comp/r-sas_ttest_2Sample/execute-results/html.json @@ -3,9 +3,7 @@ "result": { "engine": "knitr", "markdown": "---\ntitle: \"R vs SAS Two Sample T-Test\"\n---\n\n\n\n# Two Sample t-test Comparison\n\nThe following table shows the types of Two Sample t-test analysis, the capabilities of each language, and whether or not the results from each language match.\n\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n|---------------|---------------|---------------|---------------|---------------|\n| Two sample Student's t-test | [Yes](../R/ttest_2Sample.html#baseS) | [Yes](../SAS/ttest_2Sample.html#sas) | [Yes](#student) | In Base R, use `t.test()` function with `paired = FALSE` and `var.equal = TRUE`|\n| Two sample Welch's t-test | [Yes](../R/ttest_2Sample.html#baseW) | [Yes](../SAS/ttest_2Sample.html#sas) | [Yes](#welch) | In Base R, use `t.test()` function with `paired = FALSE` and `var.equal = FALSE` |\n\n## Comparison Results\n\n### Student's T-Test {#student}\n\nHere is a table of comparison values between `t.test()`, `proc_ttest()`, and SAS `PROC TTEST`:\n\n| Statistic | t.test() | proc_ttest() | PROC TTEST | Match | Notes |\n|--------------------|-----------|--------------|------------|-------|-------|\n| Degrees of Freedom | 30 | 30 | 30 | Yes | |\n| t value | -0.6969002 | -0.6969002 | -0.6969002 | Yes | |\n| p value | 0.4912306 | 0.4912306 | 0.4912306 | Yes | |\n\n\n### Welch's T-Test {#welch}\n\nIn the Welch T-test the variance and effective degrees of freedom are calculated using Satterthwaite method.\n\nHere is a table of comparison values between `t.test()`, `proc_ttest()`, and SAS `PROC TTEST`for two example:\n\nExample with fairly equal variances:\n| Statistic | t.test() | proc_ttest() | PROC TTEST | Match | Notes |\n|--------------------|-----------|--------------|------------|-------|-------|\n| Degrees of Freedom | 29.69359 | 29.69359 | 29.69359 | Yes | |\n| t value | -0.6969002 | -0.6969002 | -0.6969002 | Yes | |\n| p value | 0.4912856 | 0.4912856 | 0.4912856 | Yes | |\n\n\nExample with unequal variances:\n| Statistic | t.test() | proc_ttest() | PROC TTEST | Match | Notes |\n|--------------------|-----------|--------------|------------|-------|-------|\n| Degrees of Freedom | 18.137 | 18.137 | 18.137 | Yes | |\n| t value | -1.54 | -1.54 | -1.54 | Yes | |\n| p value | 0.1413 | 0.1413 | 0.1413 | Yes | |\n\n\n# Summary and Recommendation\n\nFor the Student's T-Test, the R two sample `t.test()` and **procs** `proc_ttest()` capabilities are comparable to SAS. \nComparison between SAS and R show identical results for the datasets tried. \n\nLikewise, for the Welch's T-Test, the R two sample `t.test()` and **procs** `proc_ttest()` capabilities are comparable to SAS. \nComparison between SAS and R show identical results for the datasets tried. \n\n\n# References\n\nR `t.test()` documentation: \n\nR `proc_ttest()` documentation: \n\nSAS `PROC TTEST` Two Sample analysis documentation: \n", - "supporting": [ - "r-sas_ttest_2Sample_files" - ], + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-3-1.png b/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-3-1.png index 585702888..de95995b3 100644 Binary files a/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-3-1.png and b/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-3-1.png differ diff --git a/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-6-1.png b/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-6-1.png index 05e156982..eabb5fd3d 100644 Binary files a/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-6-1.png and b/_freeze/R/Accelerated_Failure_time_model/figure-html/unnamed-chunk-6-1.png differ diff --git a/_freeze/R/Clustering_Knowhow/execute-results/html.json b/_freeze/R/Clustering_Knowhow/execute-results/html.json index afe460a07..2c0aa7f4a 100644 --- a/_freeze/R/Clustering_Knowhow/execute-results/html.json +++ b/_freeze/R/Clustering_Knowhow/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "306f2f93d6f421de390f5d3e60d96d6c", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Clustering Data\"\nauthor: \"Niladri Dasgupta\"\ndate: \"2024-08-12\"\n---\n\n\n\n## **What is clustering?**\n\nClustering is a method of segregating unlabeled data or data points into different groups/clusters such that similar data points fall in the same cluster than those which differ from the others. The similarity measures are calculated using distance based metrics like Euclidean distance, Cosine similarity, Manhattan distance, etc.\n\nFor Example, In the graph given below, we can clearly see that the data points can be grouped into 3 clusters\n\n![](../images/Clustering/clustering_ex.PNG)
\n\n## **Type of Clustering Algorithm**\n\nSome of the popular clustering algorithms are:\n\n1. Centroid-based Clustering (Partitioning methods)\n2. Density-based Clustering (Model-based methods)\n3. Connectivity-based Clustering (Hierarchical clustering)\n4. Distribution-based Clustering\n\n### 1.Centroid-based Clustering (Partitioning methods)\n\nPartitioning methods group data points on the basis of their closeness. The similarity measure chosen for these algorithms are Euclidean distance, Manhattan Distance or Minkowski Distance.\n\nThe primary drawback for these algorithms is we need to pre define the number of clusters before allocating the data points to a group.\n\nOne of the popular centroid based clustering technique is K means Clustering.
\n\n#### **K Means Clustering**\n\nK means is an iterative clustering algorithm that works in these 5 steps:\n\n1. Specify the desired number of clusters K: Let us choose k=2 for these 5 data points in 2-D space.\n\n ![](../images/Clustering/kmeans_1.png)\n\n2. Randomly assign each data point to a cluster: Let’s assign three points in cluster 1, shown using orange color, and two points in cluster 2, shown using grey color.\n\n ![](../images/Clustering/kmeans_2.png)\n\n3. Compute cluster centroids: Centroids correspond to the arithmetic mean of data points assigned to the cluster. The centroid of data points in the orange cluster is shown using the orange cross, and those in the grey cluster using a grey cross.\n\n ![](../images/Clustering/kmeans_3.png)\n\n4. Assigns each observation to their closest centroid, based on the Euclidean distance between the object and the centroid\n\n ![](../images/Clustering/kmeans_4.png)\n\n5. Re-computing the centroids for both clusters.\n\n ![](../images/Clustering/kmeans_5.png)\n\nWe will repeat the 4th and 5th steps until no further switching of data points between two clusters for two successive repeats.
\n\n#### K-Means Clustering in R\n\n**Step 1: Load packages**\n\nFirst, we’ll load below packages that contain several useful functions regarding k-means clustering in R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(cluster) # Contain cluster function\nlibrary(dplyr) # Data manipulation\nlibrary(ggplot2) # Plotting function\nlibrary(readr) # Read and write excel/csv files\nlibrary(factoextra) # Extract and Visualize the Results of Multivariate Data Analyses\n```\n:::\n\n\n**Step 2: Load Data**\n\nWe have used the “Mall_Customer” dataset in R for this case study.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Loading the data\ndf <- read_csv(\"../data/Mall_Customers.csv\")\n\n# Structure of the data\nstr(df)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nspc_tbl_ [200 × 5] (S3: spec_tbl_df/tbl_df/tbl/data.frame)\n $ CustomerID : chr [1:200] \"0001\" \"0002\" \"0003\" \"0004\" ...\n $ Genre : chr [1:200] \"Male\" \"Male\" \"Female\" \"Female\" ...\n $ Age : num [1:200] 19 21 20 23 31 22 35 23 64 30 ...\n $ Annual Income (k$) : num [1:200] 15 15 16 16 17 17 18 18 19 19 ...\n $ Spending Score (1-100): num [1:200] 39 81 6 77 40 76 6 94 3 72 ...\n - attr(*, \"spec\")=\n .. cols(\n .. CustomerID = col_character(),\n .. Genre = col_character(),\n .. Age = col_double(),\n .. `Annual Income (k$)` = col_double(),\n .. `Spending Score (1-100)` = col_double()\n .. )\n - attr(*, \"problems\")= \n```\n\n\n:::\n:::\n\n\ndataset consists of 200 customers data with their age, annual income and Spending score.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Rename the columns\ndf <- df |>\n rename(\n \"Annual_Income\" = `Annual Income (k$)`,\n \"Spending_score\" = `Spending Score (1-100)`\n )\n\n# remove rows with missing values\ndf <- na.omit(df)\n\n# scale each variable to have a mean of 0 and sd of 1\ndf1 <- df |>\n mutate(across(where(is.numeric), scale))\n\n# view first six rows of dataset\nhead(df1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 6 × 5\n CustomerID Genre Age[,1] Annual_Income[,1] Spending_score[,1]\n \n1 0001 Male -1.42 -1.73 -0.434\n2 0002 Male -1.28 -1.73 1.19 \n3 0003 Female -1.35 -1.70 -1.71 \n4 0004 Female -1.13 -1.70 1.04 \n5 0005 Female -0.562 -1.66 -0.395\n6 0006 Female -1.21 -1.66 0.999\n```\n\n\n:::\n:::\n\n\n
\n\nWe have separated the CustomerID and Genre from the dataset. The reason for removing these variables from the cluster dataset as Kmeans can handle only numerical variables.\\\nTo create cluster with categorical or ordinal variable we can use k-Medoid clustering.
\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf1 <- df1[, 4:5]\n```\n:::\n\n\n**Step 3: Find the Optimal Number of Clusters**\n\nTo perform k-means clustering in R we can use the built-in kmeans() function, which uses the following syntax:\n\n``` \n kmeans(data, centers, iter.max, nstart)\n where:\n - data: Name of the dataset.\n - centers: The number of clusters, denoted k.\n - iter.max (optional): The maximum number of iterations allowed. Default value is 10.\n - nstart (optional): The number of initial configurations. Default value is 1.\n```\n\n- Centers is the k of K Means. centers = 5 would results in 5 clusters being created. We need to **predefine the k** before the cluster process starts.\\\n- iter.max is the number of times the algorithm will repeat the cluster assignment and update the centers / centroids. Iteration stops after this many iterations even if the convergence criterion is not satisfied\n- nstart is the number of times the initial starting points are re-sampled. It means at the initialization of Clusters you need to specify how many clusters you want and the algorithm will randomly find same number of centroids to initialize. nstart gives you an edge to initialize the centroids through re sampling.\\\n For example if total number of cluster is 3 and nstart=25 then it extracts 3 sets of data, 25 times, and for each of these times, the algorithm is run (up to iter.max \\# of iterations) and the cost function (total sum of the squares) is evaluated and finally 3 centroids with lowest cost function are chosen to start the clustering process.\n\nTo find the best number of clusters/centroids there are two popular methods as shown below.\n\n[**A. Elbow Method:**]{.underline}\n\nIt has two parts as explained below-\n\n- WSS: The Within Sum of Squares (WSS) is the sum of distance between the centroids and every other data points within a cluster. Small WSS indicates that every data point is close to its nearest centroids.\n\n- Elbow rule/method: Here we plot out the WSS score against the number of K. Because with the number of K increasing, the WSS will always decrease; however, the magnitude of decrease between each k will be diminishing, and the plot will be a curve which looks like an arm that curled up. In this way, we can find out which point falls on the elbow.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(1)\nwss <- NULL\n\n# Feeding different centroid/cluster and record WSS\nfor (i in 1:10) {\n fit = stats::kmeans(df1, centers = i, nstart = 25)\n wss = c(wss, fit$tot.withinss)\n}\n\n# Visualize the plot\nplot(1:10, wss, type = \"o\", xlab = 'Number of clusters(k)')\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-5-1.png){width=672}\n:::\n:::\n\n\nBased on the above plot at k=5 we can see an “elbow” where the sum of squares begins to “bend” or level off so the ideal number of clusters should be 5.\n\nThe above process to compute the “Elbow method” has been wrapped up in a single function (fviz_nbclust):\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfactoextra::fviz_nbclust(df1, kmeans, method = \"wss\", nstart = 25)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-6-1.png){width=672}\n:::\n:::\n\n\n[**B. Silhouette Method:**]{.underline}\n\nThe silhouette coefficient or silhouette score is a measure of how similar a data point is within-cluster (intra-cluster) compared to other clusters (inter-cluster).\\\nThe Silhouette Coefficient is calculated using the mean *intra-cluster distance (a)* and the *mean nearest-cluster distance (b)* for each sample. The Silhouette Coefficient for a sample is *(b - a) / max(a, b)*\n\nHere we will plot the silhouette width/coefficient for different number of clusters and will choose the point where the silhouette width is highest.\n\n**Points to Remember While Calculating Silhouette Coefficient:**\n\nThe value of the silhouette coefficient is between \\[-1, 1\\]. A score of 1 denotes the best, meaning that the data points are very compact within the cluster to which it belongs and far away from the other clusters. The worst value is -1. Values near 0 denote overlapping clusters.\n\nIn this demonstration, we are going to see how silhouette method is used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsilhouette_score <- function(k) {\n km <- stats::kmeans(df1, centers = k, nstart = 25)\n ss <- cluster::silhouette(km$cluster, dist(df1))\n mean(ss[, 3])\n}\nk <- 2:10\n\navg_sil <- sapply(k, silhouette_score)\nplot(\n k,\n type = 'b',\n avg_sil,\n xlab = 'Number of clusters',\n ylab = 'Average Silhouette Scores',\n frame = FALSE\n)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-7-1.png){width=672}\n:::\n:::\n\n\nFrom the above method we can see the silhouette width is highest at cluster 5 so the optimal number of cluster should be 5.\n\nSimilar to the elbow method, this process to compute the “average silhoutte method” has been wrapped up in a single function (fviz_nbclust):\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfactoextra::fviz_nbclust(df1, kmeans, method = 'silhouette', nstart = 25)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-8-1.png){width=672}\n:::\n:::\n\n\nThe optimal number of clusters is 5.\n\n**Step 4: Perform K-Means Clustering with Optimal K**\n\nLastly, we can perform k-means clustering on the dataset using the optimal value for k of 5:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# make this example reproducible\nset.seed(1)\n\n# perform k-means clustering with k = 5 clusters\nfit <- stats::kmeans(df1, 5, nstart = 25)\nfit\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nK-means clustering with 5 clusters of sizes 35, 39, 22, 23, 81\n\nCluster means:\n Annual_Income Spending_score\n1 1.0523622 -1.28122394\n2 0.9891010 1.23640011\n3 -1.3262173 1.12934389\n4 -1.3042458 -1.13411939\n5 -0.2004097 -0.02638995\n\nClustering vector:\n [1] 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4 3 4\n [38] 3 4 3 4 3 4 5 4 3 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5\n [75] 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5\n[112] 5 5 5 5 5 5 5 5 5 5 5 5 2 1 2 5 2 1 2 1 2 5 2 1 2 1 2 1 2 1 2 5 2 1 2 1 2\n[149] 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1\n[186] 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2\n\nWithin cluster sum of squares by cluster:\n[1] 18.304646 19.655252 5.217630 7.577407 14.485632\n (between_SS / total_SS = 83.6 %)\n\nAvailable components:\n\n[1] \"cluster\" \"centers\" \"totss\" \"withinss\" \"tot.withinss\"\n[6] \"betweenss\" \"size\" \"iter\" \"ifault\" \n```\n\n\n:::\n:::\n\n\nWe can visualize the clusters on a scatterplot that displays the first two principal components on the axes using the fivz_cluster() function:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# plot results of final k-means model\nfactoextra::fviz_cluster(fit, data = df1)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-10-1.png){width=672}\n:::\n:::\n\n\n**Step 5: Exporting the data by adding generated clusters**\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Adding the clusters in the main data\ndf_cluster <- df |>\n mutate(cluster = fit$cluster)\n\n# Creating Summary of created clusters based on existing variables\ndf_summary <- df_cluster |>\n group_by(cluster) |>\n summarise(\n records = n(),\n avg_age = mean(Age),\n avg_annual_income = mean(Annual_Income),\n avg_spending_score = mean(Spending_score)\n )\n\nprint(df_summary)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 5 × 5\n cluster records avg_age avg_annual_income avg_spending_score\n \n1 1 35 41.1 88.2 17.1\n2 2 39 32.7 86.5 82.1\n3 3 22 25.3 25.7 79.4\n4 4 23 45.2 26.3 20.9\n5 5 81 42.7 55.3 49.5\n```\n\n\n:::\n:::\n\n\nWe can create a group of potential customers to target based on their age, average annual income and average spending score.\n", + "markdown": "---\ntitle: \"Clustering Data\"\nauthor: \"Niladri Dasgupta\"\ndate: \"2024-08-12\"\n---\n\n\n\n## **What is clustering?**\n\nClustering is a method of segregating unlabeled data or data points into different groups/clusters such that similar data points fall in the same cluster than those which differ from the others. The similarity measures are calculated using distance based metrics like Euclidean distance, Cosine similarity, Manhattan distance, etc.\n\nFor Example, In the graph given below, we can clearly see that the data points can be grouped into 3 clusters\n\n![](../images/Clustering/clustering_ex.PNG)
\n\n## **Type of Clustering Algorithm**\n\nSome of the popular clustering algorithms are:\n\n1. Centroid-based Clustering (Partitioning methods)\n2. Density-based Clustering (Model-based methods)\n3. Connectivity-based Clustering (Hierarchical clustering)\n4. Distribution-based Clustering\n\n### 1.Centroid-based Clustering (Partitioning methods)\n\nPartitioning methods group data points on the basis of their closeness. The similarity measure chosen for these algorithms are Euclidean distance, Manhattan Distance or Minkowski Distance.\n\nThe primary drawback for these algorithms is we need to pre define the number of clusters before allocating the data points to a group.\n\nOne of the popular centroid based clustering technique is K means Clustering.
\n\n#### **K Means Clustering**\n\nK means is an iterative clustering algorithm that works in these 5 steps:\n\n1. Specify the desired number of clusters K: Let us choose k=2 for these 5 data points in 2-D space.\n\n ![](../images/Clustering/kmeans_1.png)\n\n2. Randomly assign each data point to a cluster: Let’s assign three points in cluster 1, shown using orange color, and two points in cluster 2, shown using grey color.\n\n ![](../images/Clustering/kmeans_2.png)\n\n3. Compute cluster centroids: Centroids correspond to the arithmetic mean of data points assigned to the cluster. The centroid of data points in the orange cluster is shown using the orange cross, and those in the grey cluster using a grey cross.\n\n ![](../images/Clustering/kmeans_3.png)\n\n4. Assigns each observation to their closest centroid, based on the Euclidean distance between the object and the centroid\n\n ![](../images/Clustering/kmeans_4.png)\n\n5. Re-computing the centroids for both clusters.\n\n ![](../images/Clustering/kmeans_5.png)\n\nWe will repeat the 4th and 5th steps until no further switching of data points between two clusters for two successive repeats.
\n\n#### K-Means Clustering in R\n\n**Step 1: Load packages**\n\nFirst, we’ll load below packages that contain several useful functions regarding k-means clustering in R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(cluster) # Contain cluster function\nlibrary(dplyr) # Data manipulation\nlibrary(ggplot2) # Plotting function\nlibrary(readr) # Read and write excel/csv files\nlibrary(factoextra) # Extract and Visualize the Results of Multivariate Data Analyses\n```\n:::\n\n\n**Step 2: Load Data**\n\nWe have used the “Mall_Customer” dataset in R for this case study.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Loading the data\ndf <- read_csv(\"../data/Mall_Customers.csv\")\n\n# Structure of the data\nstr(df)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nspc_tbl_ [200 × 5] (S3: spec_tbl_df/tbl_df/tbl/data.frame)\n $ CustomerID : chr [1:200] \"0001\" \"0002\" \"0003\" \"0004\" ...\n $ Genre : chr [1:200] \"Male\" \"Male\" \"Female\" \"Female\" ...\n $ Age : num [1:200] 19 21 20 23 31 22 35 23 64 30 ...\n $ Annual Income (k$) : num [1:200] 15 15 16 16 17 17 18 18 19 19 ...\n $ Spending Score (1-100): num [1:200] 39 81 6 77 40 76 6 94 3 72 ...\n - attr(*, \"spec\")=\n .. cols(\n .. CustomerID = col_character(),\n .. Genre = col_character(),\n .. Age = col_double(),\n .. `Annual Income (k$)` = col_double(),\n .. `Spending Score (1-100)` = col_double()\n .. )\n - attr(*, \"problems\")= \n```\n\n\n:::\n:::\n\n\ndataset consists of 200 customers data with their age, annual income and Spending score.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Rename the columns\ndf <- df |>\n rename(\n \"Annual_Income\" = `Annual Income (k$)`,\n \"Spending_score\" = `Spending Score (1-100)`\n )\n\n# remove rows with missing values\ndf <- na.omit(df)\n\n# scale each variable to have a mean of 0 and sd of 1\ndf1 <- df |>\n mutate(across(where(is.numeric), scale))\n\n# view first six rows of dataset\nhead(df1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 6 × 5\n CustomerID Genre Age[,1] Annual_Income[,1] Spending_score[,1]\n \n1 0001 Male -1.42 -1.73 -0.434\n2 0002 Male -1.28 -1.73 1.19 \n3 0003 Female -1.35 -1.70 -1.71 \n4 0004 Female -1.13 -1.70 1.04 \n5 0005 Female -0.562 -1.66 -0.395\n6 0006 Female -1.21 -1.66 0.999\n```\n\n\n:::\n:::\n\n\n
\n\nWe have separated the CustomerID and Genre from the dataset. The reason for removing these variables from the cluster dataset as Kmeans can handle only numerical variables.\\\nTo create cluster with categorical or ordinal variable we can use k-Medoid clustering.
\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf1 <- df1[, 4:5]\n```\n:::\n\n\n**Step 3: Find the Optimal Number of Clusters**\n\nTo perform k-means clustering in R we can use the built-in kmeans() function, which uses the following syntax:\n\n``` \n kmeans(data, centers, iter.max, nstart)\n where:\n - data: Name of the dataset.\n - centers: The number of clusters, denoted k.\n - iter.max (optional): The maximum number of iterations allowed. Default value is 10.\n - nstart (optional): The number of initial configurations. Default value is 1.\n```\n\n- Centers is the k of K Means. centers = 5 would results in 5 clusters being created. We need to **predefine the k** before the cluster process starts.\\\n- iter.max is the number of times the algorithm will repeat the cluster assignment and update the centers / centroids. Iteration stops after this many iterations even if the convergence criterion is not satisfied\n- nstart is the number of times the initial starting points are re-sampled. It means at the initialization of Clusters you need to specify how many clusters you want and the algorithm will randomly find same number of centroids to initialize. nstart gives you an edge to initialize the centroids through re sampling.\\\n For example if total number of cluster is 3 and nstart=25 then it extracts 3 sets of data, 25 times, and for each of these times, the algorithm is run (up to iter.max \\# of iterations) and the cost function (total sum of the squares) is evaluated and finally 3 centroids with lowest cost function are chosen to start the clustering process.\n\nTo find the best number of clusters/centroids there are two popular methods as shown below.\n\n[**A. Elbow Method:**]{.underline}\n\nIt has two parts as explained below-\n\n- WSS: The Within Sum of Squares (WSS) is the sum of distance between the centroids and every other data points within a cluster. Small WSS indicates that every data point is close to its nearest centroids.\n\n- Elbow rule/method: Here we plot out the WSS score against the number of K. Because with the number of K increasing, the WSS will always decrease; however, the magnitude of decrease between each k will be diminishing, and the plot will be a curve which looks like an arm that curled up. In this way, we can find out which point falls on the elbow.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(1)\nwss <- NULL\n\n# Feeding different centroid/cluster and record WSS\nfor (i in 1:10) {\n fit = stats::kmeans(df1, centers = i, nstart = 25)\n wss = c(wss, fit$tot.withinss)\n}\n\n# Visualize the plot\nplot(1:10, wss, type = \"o\", xlab = 'Number of clusters(k)')\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-5-1.png){width=672}\n:::\n:::\n\n\nBased on the above plot at k=5 we can see an “elbow” where the sum of squares begins to “bend” or level off so the ideal number of clusters should be 5.\n\nThe above process to compute the “Elbow method” has been wrapped up in a single function (fviz_nbclust):\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfactoextra::fviz_nbclust(df1, kmeans, method = \"wss\", nstart = 25)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-6-1.png){width=672}\n:::\n:::\n\n\n[**B. Silhouette Method:**]{.underline}\n\nThe silhouette coefficient or silhouette score is a measure of how similar a data point is within-cluster (intra-cluster) compared to other clusters (inter-cluster).\\\nThe Silhouette Coefficient is calculated using the mean *intra-cluster distance (a)* and the *mean nearest-cluster distance (b)* for each sample. The Silhouette Coefficient for a sample is *(b - a) / max(a, b)*\n\nHere we will plot the silhouette width/coefficient for different number of clusters and will choose the point where the silhouette width is highest.\n\n**Points to Remember While Calculating Silhouette Coefficient:**\n\nThe value of the silhouette coefficient is between \\[-1, 1\\]. A score of 1 denotes the best, meaning that the data points are very compact within the cluster to which it belongs and far away from the other clusters. The worst value is -1. Values near 0 denote overlapping clusters.\n\nIn this demonstration, we are going to see how silhouette method is used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsilhouette_score <- function(k) {\n km <- stats::kmeans(df1, centers = k, nstart = 25)\n ss <- cluster::silhouette(km$cluster, dist(df1))\n mean(ss[, 3])\n}\nk <- 2:10\n\navg_sil <- sapply(k, silhouette_score)\nplot(\n k,\n type = 'b',\n avg_sil,\n xlab = 'Number of clusters',\n ylab = 'Average Silhouette Scores',\n frame = FALSE\n)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-7-1.png){width=672}\n:::\n:::\n\n\nFrom the above method we can see the silhouette width is highest at cluster 5 so the optimal number of cluster should be 5.\n\nSimilar to the elbow method, this process to compute the “average silhoutte method” has been wrapped up in a single function (fviz_nbclust):\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfactoextra::fviz_nbclust(df1, kmeans, method = 'silhouette', nstart = 25)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-8-1.png){width=672}\n:::\n:::\n\n\nThe optimal number of clusters is 5.\n\n**Step 4: Perform K-Means Clustering with Optimal K**\n\nLastly, we can perform k-means clustering on the dataset using the optimal value for k of 5:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# make this example reproducible\nset.seed(1)\n\n# perform k-means clustering with k = 5 clusters\nfit <- stats::kmeans(df1, 5, nstart = 25)\nfit\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nK-means clustering with 5 clusters of sizes 22, 35, 81, 39, 23\n\nCluster means:\n Annual_Income Spending_score\n1 -1.3262173 1.12934389\n2 1.0523622 -1.28122394\n3 -0.2004097 -0.02638995\n4 0.9891010 1.23640011\n5 -1.3042458 -1.13411939\n\nClustering vector:\n [1] 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5 1 5\n [38] 1 5 1 5 1 5 3 5 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3\n [75] 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3\n[112] 3 3 3 3 3 3 3 3 3 3 3 3 4 2 4 3 4 2 4 2 4 3 4 2 4 2 4 2 4 2 4 3 4 2 4 2 4\n[149] 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2\n[186] 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4\n\nWithin cluster sum of squares by cluster:\n[1] 5.217630 18.304646 14.485632 19.655252 7.577407\n (between_SS / total_SS = 83.6 %)\n\nAvailable components:\n\n[1] \"cluster\" \"centers\" \"totss\" \"withinss\" \"tot.withinss\"\n[6] \"betweenss\" \"size\" \"iter\" \"ifault\" \n```\n\n\n:::\n:::\n\n\nWe can visualize the clusters on a scatterplot that displays the first two principal components on the axes using the fivz_cluster() function:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# plot results of final k-means model\nfactoextra::fviz_cluster(fit, data = df1)\n```\n\n::: {.cell-output-display}\n![](Clustering_Knowhow_files/figure-html/unnamed-chunk-10-1.png){width=672}\n:::\n:::\n\n\n**Step 5: Exporting the data by adding generated clusters**\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Adding the clusters in the main data\ndf_cluster <- df |>\n mutate(cluster = fit$cluster)\n\n# Creating Summary of created clusters based on existing variables\ndf_summary <- df_cluster |>\n group_by(cluster) |>\n summarise(\n records = n(),\n avg_age = mean(Age),\n avg_annual_income = mean(Annual_Income),\n avg_spending_score = mean(Spending_score)\n )\n\nprint(df_summary)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 5 × 5\n cluster records avg_age avg_annual_income avg_spending_score\n \n1 1 22 25.3 25.7 79.4\n2 2 35 41.1 88.2 17.1\n3 3 81 42.7 55.3 49.5\n4 4 39 32.7 86.5 82.1\n5 5 23 45.2 26.3 20.9\n```\n\n\n:::\n:::\n\n\nWe can create a group of potential customers to target based on their age, average annual income and average spending score.\n", "supporting": [ "Clustering_Knowhow_files" ], diff --git a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-10-1.png b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-10-1.png index 4927852df..fd15cf602 100644 Binary files a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-10-1.png and b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-10-1.png differ diff --git a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-5-1.png b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-5-1.png index 8bca1d712..c1e20e63a 100644 Binary files a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-5-1.png and b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-5-1.png differ diff --git a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-6-1.png b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-6-1.png index 40205af4b..355508b7c 100644 Binary files a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-6-1.png and b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-6-1.png differ diff --git a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-7-1.png b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-7-1.png index 8910d5503..31ca285fd 100644 Binary files a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-7-1.png and b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-7-1.png differ diff --git a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-8-1.png b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-8-1.png index 0d0b0e928..71c0886bf 100644 Binary files a/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-8-1.png and b/_freeze/R/Clustering_Knowhow/figure-html/unnamed-chunk-8-1.png differ diff --git a/_freeze/R/PCA_analysis/execute-results/html.json b/_freeze/R/PCA_analysis/execute-results/html.json index 3afcaff0f..012cf69fe 100644 --- a/_freeze/R/PCA_analysis/execute-results/html.json +++ b/_freeze/R/PCA_analysis/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "24e13179cc4cb2fe24a434f7301eb2f0", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Principle Component Analysis\"\n---\n\n## Introduction\n\nPrincipal Component Analysis (PCA) is a dimensionality reduction technique used to reduce the number of features in a dataset while retaining most of the information.\n\n### Steps to Perform PCA in R\n\n- We will load the `iris` data.\n- Standardize the data and then compute PCA.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(factoextra)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: ggplot2\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWelcome! Want to learn more? See two factoextra-related books at https://goo.gl/ve3WBa\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(plotly)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'plotly'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:ggplot2':\n\n last_plot\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:stats':\n\n filter\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:graphics':\n\n layout\n```\n\n\n:::\n\n```{.r .cell-code}\ndata <- iris\npca_result <- stats::prcomp(data[, 1:4], scale = T)\npca_result\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nStandard deviations (1, .., p=4):\n[1] 1.7083611 0.9560494 0.3830886 0.1439265\n\nRotation (n x k) = (4 x 4):\n PC1 PC2 PC3 PC4\nSepal.Length 0.5210659 -0.37741762 0.7195664 0.2612863\nSepal.Width -0.2693474 -0.92329566 -0.2443818 -0.1235096\nPetal.Length 0.5804131 -0.02449161 -0.1421264 -0.8014492\nPetal.Width 0.5648565 -0.06694199 -0.6342727 0.5235971\n```\n\n\n:::\n:::\n\n\nWe print the summary of the PCA result, which includes the standard deviation of each principal component and the proportion of variance explained.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(pca_result)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nImportance of components:\n PC1 PC2 PC3 PC4\nStandard deviation 1.7084 0.9560 0.38309 0.14393\nProportion of Variance 0.7296 0.2285 0.03669 0.00518\nCumulative Proportion 0.7296 0.9581 0.99482 1.00000\n```\n\n\n:::\n:::\n\n\n## Visualize PCA Results\n\n### Scree Plot\n\nA scree plot shows the proportion of variance explained by each principal component.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfactoextra::fviz_eig(pca_result)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in geom_bar(stat = \"identity\", fill = barfill, color = barcolor, :\nIgnoring empty aesthetic: `width`.\n```\n\n\n:::\n\n::: {.cell-output-display}\n![](PCA_analysis_files/figure-html/unnamed-chunk-3-1.png){width=672}\n:::\n:::\n\n\n### Biplot\n\nA biplot shows the scores of the samples and the loadings of the variables on the first two principal components.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nplt <- factoextra::fviz_pca_biplot(\n pca_result,\n geom.ind = \"point\",\n pointshape = 21,\n pointsize = 2,\n fill.ind = iris$Species,\n col.var = \"black\",\n repel = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.\nℹ Please use `linewidth` instead.\nℹ The deprecated feature was likely used in the ggpubr package.\n Please report the issue at .\n```\n\n\n:::\n\n```{.r .cell-code}\nplt\n```\n\n::: {.cell-output-display}\n![](PCA_analysis_files/figure-html/unnamed-chunk-4-1.png){width=672}\n:::\n:::\n\n\n## Interpretation\n\n- The **Scree Plot** suggests to decide the number of principle components to retain by looking an *elbow* point where the explained variance starts to level off.\n\n- The **biplot** visualizes both the samples (points) and the variables (arrows). Points that are close to each other represent samples with similar characteristics, while the direction and length of the arrows indicate the contribution of each variable to the principal components.\n\n## Visualization of PCA in 3d Scatter Plot\n\nA 3d scatter plot allows us to see the relationships between three principle components simultaneously and also gives us a better understanding of how much variance is explained by these components.\n\nIt also allows for interactive exploration where we can rotate the plot and view it from a different angles.\n\nWe will plot this using `plotly` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\npca_result2 <- stats::prcomp(data[, 1:4], scale = T, rank. = 3)\npca_result2\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nStandard deviations (1, .., p=4):\n[1] 1.7083611 0.9560494 0.3830886 0.1439265\n\nRotation (n x k) = (4 x 3):\n PC1 PC2 PC3\nSepal.Length 0.5210659 -0.37741762 0.7195664\nSepal.Width -0.2693474 -0.92329566 -0.2443818\nPetal.Length 0.5804131 -0.02449161 -0.1421264\nPetal.Width 0.5648565 -0.06694199 -0.6342727\n```\n\n\n:::\n:::\n\n\nNext, we will create a dataframe of the 3 principle components and negate PC2 and PC3 for visual preference to make the plot look more organised and symmetric in 3d space.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncomponents <- as.data.frame(pca_result2$x)\ncomponents$PC2 <- components$PC2\ncomponents$PC3 <- components$PC3\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfig <- plotly::plot_ly(\n components,\n x = ~PC1,\n y = ~PC2,\n z = ~PC3,\n color = ~ data$Species,\n colors = c('darkgreen', 'darkblue', 'darkred')\n) |>\n add_markers(size = 12)\n\nfig <- fig |>\n layout(title = \"3d Visualization of PCA\", scene = list(bgcolor = \"lightgray\"))\nfig\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n", + "markdown": "---\ntitle: \"Principle Component Analysis\"\n---\n\n## Introduction\n\nPrincipal Component Analysis (PCA) is a dimensionality reduction technique used to reduce the number of features in a dataset while retaining most of the information.\n\n### Steps to Perform PCA in R\n\n- We will load the `iris` data.\n- Standardize the data and then compute PCA.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(factoextra)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: ggplot2\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWelcome! Want to learn more? See two factoextra-related books at https://goo.gl/ve3WBa\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(plotly)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'plotly'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:ggplot2':\n\n last_plot\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:stats':\n\n filter\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:graphics':\n\n layout\n```\n\n\n:::\n\n```{.r .cell-code}\ndata <- iris\npca_result <- stats::prcomp(data[, 1:4], scale = T)\npca_result\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nStandard deviations (1, .., p=4):\n[1] 1.7083611 0.9560494 0.3830886 0.1439265\n\nRotation (n x k) = (4 x 4):\n PC1 PC2 PC3 PC4\nSepal.Length 0.5210659 -0.37741762 0.7195664 0.2612863\nSepal.Width -0.2693474 -0.92329566 -0.2443818 -0.1235096\nPetal.Length 0.5804131 -0.02449161 -0.1421264 -0.8014492\nPetal.Width 0.5648565 -0.06694199 -0.6342727 0.5235971\n```\n\n\n:::\n:::\n\n\nWe print the summary of the PCA result, which includes the standard deviation of each principal component and the proportion of variance explained.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(pca_result)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nImportance of components:\n PC1 PC2 PC3 PC4\nStandard deviation 1.7084 0.9560 0.38309 0.14393\nProportion of Variance 0.7296 0.2285 0.03669 0.00518\nCumulative Proportion 0.7296 0.9581 0.99482 1.00000\n```\n\n\n:::\n:::\n\n\n## Visualize PCA Results\n\n### Scree Plot\n\nA scree plot shows the proportion of variance explained by each principal component.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfactoextra::fviz_eig(pca_result)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in geom_bar(stat = \"identity\", fill = barfill, color = barcolor, :\nIgnoring empty aesthetic: `width`.\n```\n\n\n:::\n\n::: {.cell-output-display}\n![](PCA_analysis_files/figure-html/unnamed-chunk-3-1.png){width=672}\n:::\n:::\n\n\n### Biplot\n\nA biplot shows the scores of the samples and the loadings of the variables on the first two principal components.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nplt <- factoextra::fviz_pca_biplot(\n pca_result,\n geom.ind = \"point\",\n pointshape = 21,\n pointsize = 2,\n fill.ind = iris$Species,\n col.var = \"black\",\n repel = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.\nℹ Please use `linewidth` instead.\nℹ The deprecated feature was likely used in the ggpubr package.\n Please report the issue at .\n```\n\n\n:::\n\n```{.r .cell-code}\nplt\n```\n\n::: {.cell-output-display}\n![](PCA_analysis_files/figure-html/unnamed-chunk-4-1.png){width=672}\n:::\n:::\n\n\n## Interpretation\n\n- The **Scree Plot** suggests to decide the number of principle components to retain by looking an *elbow* point where the explained variance starts to level off.\n\n- The **biplot** visualizes both the samples (points) and the variables (arrows). Points that are close to each other represent samples with similar characteristics, while the direction and length of the arrows indicate the contribution of each variable to the principal components.\n\n## Visualization of PCA in 3d Scatter Plot\n\nA 3d scatter plot allows us to see the relationships between three principle components simultaneously and also gives us a better understanding of how much variance is explained by these components.\n\nIt also allows for interactive exploration where we can rotate the plot and view it from a different angles.\n\nWe will plot this using `plotly` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\npca_result2 <- stats::prcomp(data[, 1:4], scale = T, rank. = 3)\npca_result2\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nStandard deviations (1, .., p=4):\n[1] 1.7083611 0.9560494 0.3830886 0.1439265\n\nRotation (n x k) = (4 x 3):\n PC1 PC2 PC3\nSepal.Length 0.5210659 -0.37741762 0.7195664\nSepal.Width -0.2693474 -0.92329566 -0.2443818\nPetal.Length 0.5804131 -0.02449161 -0.1421264\nPetal.Width 0.5648565 -0.06694199 -0.6342727\n```\n\n\n:::\n:::\n\n\nNext, we will create a dataframe of the 3 principle components and negate PC2 and PC3 for visual preference to make the plot look more organised and symmetric in 3d space.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncomponents <- as.data.frame(pca_result2$x)\ncomponents$PC2 <- components$PC2\ncomponents$PC3 <- components$PC3\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfig <- plotly::plot_ly(\n components,\n x = ~PC1,\n y = ~PC2,\n z = ~PC3,\n color = ~ data$Species,\n colors = c('darkgreen', 'darkblue', 'darkred')\n) |>\n add_markers(size = 12)\n\nfig <- fig |>\n layout(title = \"3d Visualization of PCA\", scene = list(bgcolor = \"lightgray\"))\nfig\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n", "supporting": [ "PCA_analysis_files" ], diff --git a/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-3-1.png b/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-3-1.png index 540f34bf7..59fc6f662 100644 Binary files a/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-3-1.png and b/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-3-1.png differ diff --git a/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-4-1.png b/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-4-1.png index 0cb2b2581..63bff5c0e 100644 Binary files a/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-4-1.png and b/_freeze/R/PCA_analysis/figure-html/unnamed-chunk-4-1.png differ diff --git a/_freeze/R/R_Friedmantest/execute-results/html.json b/_freeze/R/R_Friedmantest/execute-results/html.json deleted file mode 100644 index 1364405f3..000000000 --- a/_freeze/R/R_Friedmantest/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "1dd594991edaafc1d87773b8f63b7e06", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Friedman Chi-Square test using R package rstatix\"\n---\n\n\n\n## R package version\n\nR 4.3.1\\\nrstatix 0.7.2\n\n## Data used\n\nSimulated dataset of 10 subjects(blocks) with continuous endpoints are generated for single-drug repeated measurements to check whether any significance exists between the responses(y) at different time points(4 time points simulated)(groups). The p-value will indicate whether differences in response for different time points are significant.\n\n## Data source\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(dplyr)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'dplyr'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:stats':\n\n filter, lag\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:base':\n\n intersect, setdiff, setequal, union\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(rstatix) \n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'rstatix'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:stats':\n\n filter\n```\n\n\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(123)\n\none_way_repeat <- data.frame(subject = rep(1:10, each=4),\n timepoint = rep(c(1, 2, 3, 4), times=10),\n response =round(runif(n = 40, 10, 50)))\n\nhead(one_way_repeat)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n subject timepoint response\n1 1 1 22\n2 1 2 42\n3 1 3 26\n4 1 4 45\n5 2 1 48\n6 2 2 12\n```\n\n\n:::\n:::\n\n\n\n## Overview\n\nThe friedman.test() function from the R package 'rstatix' computes the following:\n\n- Friedman chi-squared statistic\n\n- its degrees of freedom and\n\n- p-value for checking whether the mean response is the same for all the time points.\n\n## Handling missing Values\n\nWhen the data contain NAs, the function Defaults to getOption(\"na.action\") which discards the row with NAs.\n\n## Example Code for Friedman Chi-square test\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfriedman.test(y=one_way_repeat$response, groups=one_way_repeat$timepoint, blocks=one_way_repeat$subject)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tFriedman rank sum test\n\ndata: one_way_repeat$response, one_way_repeat$timepoint and one_way_repeat$subject\nFriedman chi-squared = 3.0612, df = 3, p-value = 0.3823\n```\n\n\n:::\n:::\n\n\n\nwhere:\n\n- y - a vector of response values.\n\n- groups - a vector of values indicating the \"group\" an observation belongs in.\n\n- blocks - a vector of values indicating the \"blocking\" variable.\n\n## References\n\n[R `friedman_test()` documentation](https://search.r-project.org/CRAN/refmans/rstatix/html/friedman_test.html)\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/R/Weighted-log-rank/execute-results/html.json b/_freeze/R/Weighted-log-rank/execute-results/html.json index 38da2ba41..fc7a4e97f 100644 --- a/_freeze/R/Weighted-log-rank/execute-results/html.json +++ b/_freeze/R/Weighted-log-rank/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "f8f3ef2a4511507643f3f8f10a1edba9", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Testing approaches under non-proportional hazards\"\n---\n\n# Introduction\n\nIn clinical studies with time-to-event outcomes, it is commonly assumed that the hazard functions of two groups are proportional. The standard log-rank test is widely used to test the equivalence of survival functions. However, several scenarios can lead to non-proportional hazards (NPH). For example, a delayed treatment effect may be observed in the treatment arm which can lead to departure from proportionality of the survival curves. Thus there are many tests available in the literature that can handle such scenarios. Most commonly used tests are as follows:\n\n- Weighted log-rank test\n- Restricted Mean Survival Time (RMST)\n- Milestone survival\n- Max-Combo test.\n\nWhile these tests may be explored in a separate document, this particular document focuses solely on the weighted log-rank test.\\\n\n# Weighted log-rank test\n\nSuppose we have two groups (e.g. treatment and control, male and female etc.) with survival functions $S_1$ & $S_2$ respectively. The null and alternative hypotheses are given as: $$H_0 : S_1(t)=S_2(t) \\mbox{ }\\forall t \\mbox{ v/s } H_1 : S_1(t) \\neq S_2(t) \\mbox{ for some t. }$$ Since alternative hypothesis is composite, it includes multiple scenarios. Hence the power calculation is difficult to implement. One way to tackle this situation is to consider the Lehman alternative given by $H_L : S_1(t)=(S_2(t))^\\psi$ for all $t$ where $0<\\psi<1$. Alternatively, $$ H_0 : \\psi=1 \\ v/s \\ H_1: \\psi<1$$\n\nwhich implies subjects in group 1 will have longer survival times than subjects in group 2. For more details, refer to Page 44 of the [Moore (2016)](https://xsliulab.github.io/Workshop/2021/week3/survival-analysis-book.pdf).\\\nThe test statistic for weighted log-rank test is given by, $$ Z = \\frac{\\sum_{j=1}^{D}w_j(o_{j} -e_j)}{\\sqrt{\\sum_{j=1}^{D}w_j^2 v_j}} \\to N(0,1), \\text{under} \\ H_0$$ Equivalently, $$ Z^2 = \\frac{\\big[\\sum_{j=1}^{D}w_j(o_j -e_j)\\big]^2}{\\sum_{j=1}^{D}w_j^2 v_j} \\to \\chi^2_1, \\text{under} \\ H_0.$$\\\nHere $t_1 \n1 1 83 0 89 152 78 25.5 1 1 0 0 0\n2 2 49 0 84 120 60 24.0 1 0 0 0 0\n3 3 70 1 83 147 88 22.1 0 0 0 0 0\n4 4 70 0 65 123 76 26.6 1 0 0 1 0\n5 5 70 0 63 135 85 24.4 1 0 0 0 0\n6 6 70 0 76 83 54 23.2 1 0 0 0 1\n# ℹ 7 more variables: MIORD , MITYPE , YEAR , LOS ,\n# DSTAT , LENFOL , FSTAT \n```\n\n\n:::\n:::\n\n\n## *survdiff()*\n\nThis function uses $G(\\rho)=\\hat{S}(t)^\\rho, \\rho \\geq 0$ , where $\\hat{S}(t)$ is the Kaplan-Meier estimate of the survival function at time $t$. If $\\rho = 0$, then this is the standard log-rank test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nWLRtest <- survival::survdiff(\n survival::Surv(LENFOL, FSTAT) ~ AFB,\n rho = 3,\n data = dat\n)\nWLRtest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvival::survdiff(formula = survival::Surv(LENFOL, FSTAT) ~ \n AFB, data = dat, rho = 3)\n\n N Observed Expected (O-E)^2/E (O-E)^2/V\nAFB=0 422 86.3 94.5 0.718 7.68\nAFB=1 78 24.2 16.0 4.245 7.68\n\n Chisq= 7.7 on 1 degrees of freedom, p= 0.006 \n```\n\n\n:::\n:::\n\n\nFor the illustration, $\\rho$ is taken as 3 while calculating weights and the weighted log rank test reject the null hypothesis at 2.5% level of significance.\n\n## *wlrt*()\n\nThis function uses $G(\\rho,\\gamma)=\\hat{S}(t)^\\rho (1-\\hat{S}(t))^\\gamma; \\rho,\\gamma \\geq 0,$ , where $\\hat{S}(t)$ is the Kaplan-Meier estimate of the survival function at time $t$. If $\\rho = \\gamma = 0$, then this is the standard log-rank test. When $\\rho=0, \\gamma=1$ this test can be used to detect early difference in the survival curves, when $\\rho=1, \\gamma = 0$, this test can be used to detect late differences in the survival curves and when $\\rho=1, \\gamma = 1$ this test can be used to test middle differences in the survival curves. Also it is to be noted that this test gives the Z-score as the test statistic which can be squared to obtain the chi-square statistic.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(nphRCT)\nWL <- nphRCT::wlrt(\n survival::Surv(LENFOL, FSTAT) ~ AFB,\n data = dat,\n method = \"fh\",\n rho = 0,\n gamma = 0\n)\nWL\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n u v_u z trt_group\n1 16.77487 25.81609 3.301521 1\n```\n\n\n:::\n:::\n\n\nTo obtain the corresponding $p$-value we can either use *2(1-pnorm(abs(WL\\$z),0,1))* or we can square the test statistic *WL\\$z* by using *(WL\\$z)\\^2* and obtain the corresponding $p$-values as *1 - pchisq((WL\\$z)\\^2,1)* , both the $p$-values will be the same.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n2 * (1 - pnorm(abs(WL$z), 0, 1))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0009616214\n```\n\n\n:::\n\n```{.r .cell-code}\n(WL$z)^2\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 10.90004\n```\n\n\n:::\n\n```{.r .cell-code}\n1 - pchisq((WL$z)^2, 1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0009616214\n```\n\n\n:::\n:::\n\n\nFor the illustration purpose we used $\\rho=0,\\ \\gamma=0$ and in this scenario weighted log-rank test becomes standard log-rank test. Therefore, the result obtained in this illustration is consistent with the result obtained in [standard log-rank test](Survival%20Analysis%20Using%20R%20(psiaims.github.io)).\n\n# References\n\n1. Knezevic, A., & Patil, S. (2020). Combination weighted log-rank tests for survival analysis with non-proportional hazards. *SAS Global Forum*.\n2. Magirr, D., & Barrott, I. (2022). nphRCT: Non-Proportional Hazards in Randomized Controlled Trials.\n3. Moore, D. F. (2016). *Applied survival analysis using R* (Vol. 473, pp. 1-10). Cham: Springer.\n4. Therneau T (2024). A Package for Survival Analysis in R.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P nphRCT * 0.1.1 2024-06-27 [?] RSPM\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", + "markdown": "---\ntitle: \"Testing approaches under non-proportional hazards\"\n---\n\n# Introduction\n\nIn clinical studies with time-to-event outcomes, it is commonly assumed that the hazard functions of two groups are proportional. The standard log-rank test is widely used to test the equivalence of survival functions. However, several scenarios can lead to non-proportional hazards (NPH). For example, a delayed treatment effect may be observed in the treatment arm which can lead to departure from proportionality of the survival curves. Thus there are many tests available in the literature that can handle such scenarios. Most commonly used tests are as follows:\n\n- Weighted log-rank test\n- Restricted Mean Survival Time (RMST)\n- Milestone survival\n- Max-Combo test.\n\nWhile these tests may be explored in a separate document, this particular document focuses solely on the weighted log-rank test.\\\n\n# Weighted log-rank test\n\nSuppose we have two groups (e.g. treatment and control, male and female etc.) with survival functions $S_1$ & $S_2$ respectively. The null and alternative hypotheses are given as: $$H_0 : S_1(t)=S_2(t) \\mbox{ }\\forall t \\mbox{ v/s } H_1 : S_1(t) \\neq S_2(t) \\mbox{ for some t. }$$ Since alternative hypothesis is composite, it includes multiple scenarios. Hence the power calculation is difficult to implement. One way to tackle this situation is to consider the Lehman alternative given by $H_L : S_1(t)=(S_2(t))^\\psi$ for all $t$ where $0<\\psi<1$. Alternatively, $$ H_0 : \\psi=1 \\ v/s \\ H_1: \\psi<1$$\n\nwhich implies subjects in group 1 will have longer survival times than subjects in group 2. For more details, refer to Page 44 of the [Moore (2016)](https://xsliulab.github.io/Workshop/2021/week3/survival-analysis-book.pdf).\\\nThe test statistic for weighted log-rank test is given by, $$ Z = \\frac{\\sum_{j=1}^{D}w_j(o_{j} -e_j)}{\\sqrt{\\sum_{j=1}^{D}w_j^2 v_j}} \\to N(0,1), \\text{under} \\ H_0$$ Equivalently, $$ Z^2 = \\frac{\\big[\\sum_{j=1}^{D}w_j(o_j -e_j)\\big]^2}{\\sum_{j=1}^{D}w_j^2 v_j} \\to \\chi^2_1, \\text{under} \\ H_0.$$\\\nHere $t_1 \n1 1 83 0 89 152 78 25.5 1 1 0 0 0\n2 2 49 0 84 120 60 24.0 1 0 0 0 0\n3 3 70 1 83 147 88 22.1 0 0 0 0 0\n4 4 70 0 65 123 76 26.6 1 0 0 1 0\n5 5 70 0 63 135 85 24.4 1 0 0 0 0\n6 6 70 0 76 83 54 23.2 1 0 0 0 1\n# ℹ 7 more variables: MIORD , MITYPE , YEAR , LOS ,\n# DSTAT , LENFOL , FSTAT \n```\n\n\n:::\n:::\n\n\n## *survdiff()*\n\nThis function uses $G(\\rho)=\\hat{S}(t)^\\rho, \\rho \\geq 0$ , where $\\hat{S}(t)$ is the Kaplan-Meier estimate of the survival function at time $t$. If $\\rho = 0$, then this is the standard log-rank test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nWLRtest <- survival::survdiff(\n survival::Surv(LENFOL, FSTAT) ~ AFB,\n rho = 3,\n data = dat\n)\nWLRtest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvival::survdiff(formula = survival::Surv(LENFOL, FSTAT) ~ \n AFB, data = dat, rho = 3)\n\n N Observed Expected (O-E)^2/E (O-E)^2/V\nAFB=0 422 86.3 94.5 0.718 7.68\nAFB=1 78 24.2 16.0 4.245 7.68\n\n Chisq= 7.7 on 1 degrees of freedom, p= 0.006 \n```\n\n\n:::\n:::\n\n\nFor the illustration, $\\rho$ is taken as 3 while calculating weights and the weighted log rank test reject the null hypothesis at 2.5% level of significance.\n\n## *wlrt*()\n\nThis function uses $G(\\rho,\\gamma)=\\hat{S}(t)^\\rho (1-\\hat{S}(t))^\\gamma; \\rho,\\gamma \\geq 0,$ , where $\\hat{S}(t)$ is the Kaplan-Meier estimate of the survival function at time $t$. If $\\rho = \\gamma = 0$, then this is the standard log-rank test. When $\\rho=0, \\gamma=1$ this test can be used to detect early difference in the survival curves, when $\\rho=1, \\gamma = 0$, this test can be used to detect late differences in the survival curves and when $\\rho=1, \\gamma = 1$ this test can be used to test middle differences in the survival curves. Also it is to be noted that this test gives the Z-score as the test statistic which can be squared to obtain the chi-square statistic.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(nphRCT)\nWL <- nphRCT::wlrt(\n survival::Surv(LENFOL, FSTAT) ~ AFB,\n data = dat,\n method = \"fh\",\n rho = 0,\n gamma = 0\n)\nWL\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n u v_u z trt_group\n1 16.77487 25.81609 3.301521 1\n```\n\n\n:::\n:::\n\n\nTo obtain the corresponding $p$-value we can either use *2(1-pnorm(abs(WL\\$z),0,1))* or we can square the test statistic *WL\\$z* by using *(WL\\$z)\\^2* and obtain the corresponding $p$-values as *1 - pchisq((WL\\$z)\\^2,1)* , both the $p$-values will be the same.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n2 * (1 - pnorm(abs(WL$z), 0, 1))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0009616214\n```\n\n\n:::\n\n```{.r .cell-code}\n(WL$z)^2\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 10.90004\n```\n\n\n:::\n\n```{.r .cell-code}\n1 - pchisq((WL$z)^2, 1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0009616214\n```\n\n\n:::\n:::\n\n\nFor the illustration purpose we used $\\rho=0,\\ \\gamma=0$ and in this scenario weighted log-rank test becomes standard log-rank test. Therefore, the result obtained in this illustration is consistent with the result obtained in [standard log-rank test](Survival%20Analysis%20Using%20R%20(psiaims.github.io)).\n\n# References\n\n1. Knezevic, A., & Patil, S. (2020). Combination weighted log-rank tests for survival analysis with non-proportional hazards. *SAS Global Forum*.\n2. Magirr, D., & Barrott, I. (2022). nphRCT: Non-Proportional Hazards in Randomized Controlled Trials.\n3. Moore, D. F. (2016). *Applied survival analysis using R* (Vol. 473, pp. 1-10). Cham: Springer.\n4. Therneau T (2024). A Package for Survival Analysis in R.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P nphRCT * 0.1.1 2024-06-27 [?] RSPM (R 4.5.0)\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/ancova/execute-results/html.json b/_freeze/R/ancova/execute-results/html.json index 86d4367b5..a7f26b1b6 100644 --- a/_freeze/R/ancova/execute-results/html.json +++ b/_freeze/R/ancova/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "ad6ead4389c7b9923a31c04bd64c4aa7", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Ancova\"\noutput: html_document\ndate: \"2023-06-01\"\n---\n\n\n\n## Introduction\n\nANOVA is a statistical method used to compare the means of three or more groups to determine if at least one group mean is significantly different from the others. Please see the [ANOVA document](anova.qmd) for more information. ANCOVA is an extension to ANOVA.\n\nANCOVA (Analysis of Covariance) is a statistical method that compares the means of two or more groups while controlling for one or more continuous covariates. By adjusting for these covariates, ANCOVA helps to reduce potential confounding effects, allowing for a clearer assessment of the main treatment effects. It assumes linear relationships between covariates and the dependent variable, along with normality and homogeneity of variances.\n\nWe follow the example from link [Analysis of Covariance](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_glm_examples04.htm)\n\n## Data Summary\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |> glimpse()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nRows: 30\nColumns: 3\n$ drug A, A, A, A, A, A, A, A, A, A, D, D, D, D, D, D, D, D, D, D, F, F,…\n$ pre 11, 8, 5, 14, 19, 6, 10, 6, 11, 3, 6, 6, 7, 8, 18, 8, 19, 8, 5, 1…\n$ post 6, 0, 2, 8, 11, 4, 13, 1, 8, 0, 0, 2, 3, 1, 18, 4, 14, 9, 1, 9, 1…\n```\n\n\n:::\n\n```{.r .cell-code}\ndf_sas |> summary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n drug pre post \n A:10 Min. : 3.00 Min. : 0.00 \n D:10 1st Qu.: 7.00 1st Qu.: 2.00 \n F:10 Median :10.50 Median : 7.00 \n Mean :10.73 Mean : 7.90 \n 3rd Qu.:13.75 3rd Qu.:12.75 \n Max. :21.00 Max. :23.00 \n```\n\n\n:::\n:::\n\n\n## The Model\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_ancova <- lm(post ~ drug + pre, data = df_sas)\n\nmodel_glance <- model_ancova |>\n glance()\nmodel_tidy <- model_ancova |>\n tidy()\nmodel_glance |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n \n \n
r.squaredadj.r.squaredsigmastatisticp.valuedflogLikAICBICdeviancedf.residualnobs
0.67626090.63890644.00577818.103861.501369e-063-82.05377174.1075181.1135417.20262630
\n
\n```\n\n:::\n\n```{.r .cell-code}\nmodel_tidy |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n \n
termestimatestd.errorstatisticp.value
(Intercept)-3.88080941.9862017-1.95388496.155192e-02
drugD0.10897131.79513510.06070379.520594e-01
drugF3.44613831.88678061.82646477.928458e-02
pre0.98718380.16449766.00120612.454330e-06
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_table <- model_ancova |>\n anova() |>\n tidy()\n\ntotal_df <- sum(model_table$df)\ntotal_sumsq <- sum(model_table$sumsq)\n\nmodel_table |>\n add_row(term = \"Total\", df = total_df, sumsq = total_sumsq) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
termdfsumsqmeansqstatisticp.value
drug2293.6000146.800009.1485539.812371e-04
pre1577.8974577.8974036.0144752.454330e-06
Residuals26417.202616.04625NANA
Total291288.7000NANANA
\n
\n```\n\n:::\n:::\n\n\n### Sums of Squares Tables {.unnumbered}\n\n#### Type I\nThis can be calculated using, the base R {stats} package or the {rstatix} package. Both give the same result.\n\n##### stats\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::anova(model_ancova)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Variance Table\n\nResponse: post\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 293.6 146.80 9.1486 0.0009812 ***\npre 1 577.9 577.90 36.0145 2.454e-06 ***\nResiduals 26 417.2 16.05 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |>\n anova_test(post ~ drug + pre, type = 1, detailed = TRUE) |>\n get_anova_table() |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
EffectDFnDFdSSnSSdFpp<.05ges
drug226293.600417.2039.1499.81e-04*0.413
pre126577.897417.20336.0142.45e-06*0.581
\n
\n```\n\n:::\n:::\n\n\n#### Type II\n\nThis can be calculated using the {car} package or the {rstatix} package. Both give the same result.\n\n##### car\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(model_ancova, type = \"II\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type II tests)\n\nResponse: post\n Sum Sq Df F value Pr(>F) \ndrug 68.55 2 2.1361 0.1384 \npre 577.90 1 36.0145 2.454e-06 ***\nResiduals 417.20 26 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |>\n anova_test(post ~ drug + pre, type = 2, detailed = TRUE) |>\n get_anova_table() |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
EffectSSnSSdDFnDFdFpp<.05ges
drug68.554417.2032262.1361.38e-010.141
pre577.897417.20312636.0142.45e-06*0.581
\n
\n```\n\n:::\n:::\n\n\n#### Type III\n\nThis can be calculated using the base R {stats} package, the {car} package or the {rstatix} package. All give the same result.\n\nNote: Calculating type III sums of squares in R is a bit tricky, because the multi-way ANOVA model is over-paramerterised. So when running the linear model we need to select a design matrix that sums to zero. In R those options will be either `\"contr.sum\"` or `\"contr.poly\"`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Drug design matrix\ncontr.sum(4) # Using 4 here as we have 4 levels of drug\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2] [,3]\n1 1 0 0\n2 0 1 0\n3 0 0 1\n4 -1 -1 -1\n```\n\n\n:::\n\n```{.r .cell-code}\n# Disease design matrix\ncontr.sum(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2]\n1 1 0\n2 0 1\n3 -1 -1\n```\n\n\n:::\n:::\n\n\nWhile not relevant for this example as the disease variable isn't ordinal the polynomial design matrix would look like\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontr.poly(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n .L .Q\n[1,] -7.071068e-01 0.4082483\n[2,] -9.681035e-17 -0.8164966\n[3,] 7.071068e-01 0.4082483\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_ancova <- lm(\n post ~ drug + pre, data = df_sas,\n contrasts = list(drug = \"contr.sum\")\n)\n```\n:::\n\n\n##### stats\n\nUsing the base stats package, you can use the `drop1()` function which drops all possible single terms in a model. The scope term specifies how things can be dropped.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::drop1(model_ancova, scope = . ~ ., test = \"F\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSingle term deletions\n\nModel:\npost ~ drug + pre\n Df Sum of Sq RSS AIC F value Pr(>F) \n 417.20 86.971 \ndrug 2 68.55 485.76 87.535 2.1361 0.1384 \npre 1 577.90 995.10 111.049 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### car\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(model_ancova, type = \"III\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type III tests)\n\nResponse: post\n Sum Sq Df F value Pr(>F) \n(Intercept) 31.93 1 1.9898 0.1702 \ndrug 68.55 2 2.1361 0.1384 \npre 577.90 1 36.0145 2.454e-06 ***\nResiduals 417.20 26 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |>\n anova_test(post ~ drug + pre, type = 3, detailed = TRUE) |>\n get_anova_table() |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
EffectSSnSSdDFnDFdFpp<.05ges
(Intercept)31.929417.2031261.9901.70e-010.071
drug68.554417.2032262.1361.38e-010.141
pre577.897417.20312636.0142.45e-06*0.581
\n
\n```\n\n:::\n:::\n\n\n### Least Squares Means\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_ancova |>\n emmeans::lsmeans(\"drug\") |>\n emmeans::pwpm(pvals = TRUE, means = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n A D F\nA [ 6.71] 0.9980 0.1809\nD -0.109 [ 6.82] 0.1893\nF -3.446 -3.337 [10.16]\n\nRow and column labels: drug\nUpper triangle: P values adjust = \"tukey\"\nDiagonal: [Estimates] (lsmean) \nLower triangle: Comparisons (estimate) earlier vs. later\n```\n\n\n:::\n\n```{.r .cell-code}\nmodel_ancova |>\n emmeans::lsmeans(\"drug\") |>\n plot(comparisons = TRUE)\n```\n\n::: {.cell-output-display}\n![](ancova_files/figure-html/unnamed-chunk-14-1.png){width=672}\n:::\n:::\n\n\n## sasLM Package\n\nThe following code performs an ANCOVA analysis using the **sasLM** package. This package was written specifically to replicate SAS statistics. The console output is also organized in a manner that is similar to SAS.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(sasLM)\n\nsasLM::GLM(post ~ drug + pre, df_sas, BETA = TRUE, EMEAN = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$ANOVA\nResponse : post\n Df Sum Sq Mean Sq F value Pr(>F) \nMODEL 3 871.5 290.499 18.104 1.501e-06 ***\nRESIDUALS 26 417.2 16.046 \nCORRECTED TOTAL 29 1288.7 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$Fitness\n Root MSE post Mean Coef Var R-square Adj R-sq\n 4.005778 7.9 50.70604 0.6762609 0.6389064\n\n$`Type I`\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 293.6 146.8 9.1486 0.0009812 ***\npre 1 577.9 577.9 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$`Type II`\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 68.55 34.28 2.1361 0.1384 \npre 1 577.90 577.90 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$`Type III`\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 68.55 34.28 2.1361 0.1384 \npre 1 577.90 577.90 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$Parameter\n Estimate Estimable Std. Error Df t value Pr(>|t|) \n(Intercept) -0.4347 0 2.4714 26 -0.1759 0.86175 \ndrugA -3.4461 0 1.8868 26 -1.8265 0.07928 . \ndrugD -3.3372 0 1.8539 26 -1.8001 0.08346 . \ndrugF 0.0000 0 0.0000 26 \npre 0.9872 1 0.1645 26 6.0012 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$`Expected Mean`\n LSmean LowerCL UpperCL SE Df\n(Intercept) 7.900000 6.396685 9.403315 0.7313516 26\ndrugA 6.714963 4.066426 9.363501 1.2884943 26\ndrugD 6.823935 4.208337 9.439532 1.2724690 26\ndrugF 10.161102 7.456182 12.866021 1.3159234 26\npre 7.900000 6.396685 9.403315 0.7313516 26\n```\n\n\n:::\n:::\n\n\nNote that the LSMEANS statistics are produced using the `EMEAN = TRUE` option. The `BETA = TRUE` option is equivalent to the `SOLUTION` option in SAS. See the **sasLM** documentation for additional information.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P broom * 1.0.12 2026-01-27 [?] RSPM\n P car 3.1-5 2026-02-03 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P emmeans * 2.0.1 2025-12-16 [?] RSPM\n P gt * 1.3.0 2026-01-22 [?] RSPM\n P rstatix * 0.7.3 2025-10-18 [?] RSPM\n P sasLM * 0.10.7 2025-09-28 [?] RSPM\n P tibble * 3.3.1 2026-01-11 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", + "markdown": "---\ntitle: \"Ancova\"\noutput: html_document\ndate: \"2023-06-01\"\n---\n\n\n\n## Introduction\n\nANOVA is a statistical method used to compare the means of three or more groups to determine if at least one group mean is significantly different from the others. Please see the [ANOVA document](anova.qmd) for more information. ANCOVA is an extension to ANOVA.\n\nANCOVA (Analysis of Covariance) is a statistical method that compares the means of two or more groups while controlling for one or more continuous covariates. By adjusting for these covariates, ANCOVA helps to reduce potential confounding effects, allowing for a clearer assessment of the main treatment effects. It assumes linear relationships between covariates and the dependent variable, along with normality and homogeneity of variances.\n\nWe follow the example from link [Analysis of Covariance](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_glm_examples04.htm)\n\n## Data Summary\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |> glimpse()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nRows: 30\nColumns: 3\n$ drug A, A, A, A, A, A, A, A, A, A, D, D, D, D, D, D, D, D, D, D, F, F,…\n$ pre 11, 8, 5, 14, 19, 6, 10, 6, 11, 3, 6, 6, 7, 8, 18, 8, 19, 8, 5, 1…\n$ post 6, 0, 2, 8, 11, 4, 13, 1, 8, 0, 0, 2, 3, 1, 18, 4, 14, 9, 1, 9, 1…\n```\n\n\n:::\n\n```{.r .cell-code}\ndf_sas |> summary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n drug pre post \n A:10 Min. : 3.00 Min. : 0.00 \n D:10 1st Qu.: 7.00 1st Qu.: 2.00 \n F:10 Median :10.50 Median : 7.00 \n Mean :10.73 Mean : 7.90 \n 3rd Qu.:13.75 3rd Qu.:12.75 \n Max. :21.00 Max. :23.00 \n```\n\n\n:::\n:::\n\n\n## The Model\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_ancova <- lm(post ~ drug + pre, data = df_sas)\n\nmodel_glance <- model_ancova |>\n glance()\nmodel_tidy <- model_ancova |>\n tidy()\nmodel_glance |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n \n \n
r.squaredadj.r.squaredsigmastatisticp.valuedflogLikAICBICdeviancedf.residualnobs
0.67626090.63890644.00577818.103861.501369e-063-82.05377174.1075181.1135417.20262630
\n
\n```\n\n:::\n\n```{.r .cell-code}\nmodel_tidy |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n \n
termestimatestd.errorstatisticp.value
(Intercept)-3.88080941.9862017-1.95388496.155192e-02
drugD0.10897131.79513510.06070379.520594e-01
drugF3.44613831.88678061.82646477.928458e-02
pre0.98718380.16449766.00120612.454330e-06
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_table <- model_ancova |>\n anova() |>\n tidy()\n\ntotal_df <- sum(model_table$df)\ntotal_sumsq <- sum(model_table$sumsq)\n\nmodel_table |>\n add_row(term = \"Total\", df = total_df, sumsq = total_sumsq) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
termdfsumsqmeansqstatisticp.value
drug2293.6000146.800009.1485539.812371e-04
pre1577.8974577.8974036.0144752.454330e-06
Residuals26417.202616.04625NANA
Total291288.7000NANANA
\n
\n```\n\n:::\n:::\n\n\n### Sums of Squares Tables {.unnumbered}\n\n#### Type I\nThis can be calculated using, the base R {stats} package or the {rstatix} package. Both give the same result.\n\n##### stats\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::anova(model_ancova)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Variance Table\n\nResponse: post\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 293.6 146.80 9.1486 0.0009812 ***\npre 1 577.9 577.90 36.0145 2.454e-06 ***\nResiduals 26 417.2 16.05 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |>\n anova_test(post ~ drug + pre, type = 1, detailed = TRUE) |>\n get_anova_table() |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
EffectDFnDFdSSnSSdFpp<.05ges
drug226293.600417.2039.1499.81e-04*0.413
pre126577.897417.20336.0142.45e-06*0.581
\n
\n```\n\n:::\n:::\n\n\n#### Type II\n\nThis can be calculated using the {car} package or the {rstatix} package. Both give the same result.\n\n##### car\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(model_ancova, type = \"II\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type II tests)\n\nResponse: post\n Sum Sq Df F value Pr(>F) \ndrug 68.55 2 2.1361 0.1384 \npre 577.90 1 36.0145 2.454e-06 ***\nResiduals 417.20 26 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |>\n anova_test(post ~ drug + pre, type = 2, detailed = TRUE) |>\n get_anova_table() |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
EffectSSnSSdDFnDFdFpp<.05ges
drug68.554417.2032262.1361.38e-010.141
pre577.897417.20312636.0142.45e-06*0.581
\n
\n```\n\n:::\n:::\n\n\n#### Type III\n\nThis can be calculated using the base R {stats} package, the {car} package or the {rstatix} package. All give the same result.\n\nNote: Calculating type III sums of squares in R is a bit tricky, because the multi-way ANOVA model is over-paramerterised. So when running the linear model we need to select a design matrix that sums to zero. In R those options will be either `\"contr.sum\"` or `\"contr.poly\"`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Drug design matrix\ncontr.sum(4) # Using 4 here as we have 4 levels of drug\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2] [,3]\n1 1 0 0\n2 0 1 0\n3 0 0 1\n4 -1 -1 -1\n```\n\n\n:::\n\n```{.r .cell-code}\n# Disease design matrix\ncontr.sum(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2]\n1 1 0\n2 0 1\n3 -1 -1\n```\n\n\n:::\n:::\n\n\nWhile not relevant for this example as the disease variable isn't ordinal the polynomial design matrix would look like\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontr.poly(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n .L .Q\n[1,] -7.071068e-01 0.4082483\n[2,] -7.850462e-17 -0.8164966\n[3,] 7.071068e-01 0.4082483\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_ancova <- lm(\n post ~ drug + pre, data = df_sas,\n contrasts = list(drug = \"contr.sum\")\n)\n```\n:::\n\n\n##### stats\n\nUsing the base stats package, you can use the `drop1()` function which drops all possible single terms in a model. The scope term specifies how things can be dropped.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::drop1(model_ancova, scope = . ~ ., test = \"F\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSingle term deletions\n\nModel:\npost ~ drug + pre\n Df Sum of Sq RSS AIC F value Pr(>F) \n 417.20 86.971 \ndrug 2 68.55 485.76 87.535 2.1361 0.1384 \npre 1 577.90 995.10 111.049 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### car\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(model_ancova, type = \"III\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type III tests)\n\nResponse: post\n Sum Sq Df F value Pr(>F) \n(Intercept) 31.93 1 1.9898 0.1702 \ndrug 68.55 2 2.1361 0.1384 \npre 577.90 1 36.0145 2.454e-06 ***\nResiduals 417.20 26 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_sas |>\n anova_test(post ~ drug + pre, type = 3, detailed = TRUE) |>\n get_anova_table() |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
EffectSSnSSdDFnDFdFpp<.05ges
(Intercept)31.929417.2031261.9901.70e-010.071
drug68.554417.2032262.1361.38e-010.141
pre577.897417.20312636.0142.45e-06*0.581
\n
\n```\n\n:::\n:::\n\n\n### Least Squares Means\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel_ancova |>\n emmeans::lsmeans(\"drug\") |>\n emmeans::pwpm(pvals = TRUE, means = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n A D F\nA [ 6.71] 0.9980 0.1809\nD -0.109 [ 6.82] 0.1893\nF -3.446 -3.337 [10.16]\n\nRow and column labels: drug\nUpper triangle: P values adjust = \"tukey\"\nDiagonal: [Estimates] (lsmean) \nLower triangle: Comparisons (estimate) earlier vs. later\n```\n\n\n:::\n\n```{.r .cell-code}\nmodel_ancova |>\n emmeans::lsmeans(\"drug\") |>\n plot(comparisons = TRUE)\n```\n\n::: {.cell-output-display}\n![](ancova_files/figure-html/unnamed-chunk-14-1.png){width=672}\n:::\n:::\n\n\n## sasLM Package\n\nThe following code performs an ANCOVA analysis using the **sasLM** package. This package was written specifically to replicate SAS statistics. The console output is also organized in a manner that is similar to SAS.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(sasLM)\n\nsasLM::GLM(post ~ drug + pre, df_sas, BETA = TRUE, EMEAN = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$ANOVA\nResponse : post\n Df Sum Sq Mean Sq F value Pr(>F) \nMODEL 3 871.5 290.499 18.104 1.501e-06 ***\nRESIDUALS 26 417.2 16.046 \nCORRECTED TOTAL 29 1288.7 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$Fitness\n Root MSE post Mean Coef Var R-square Adj R-sq\n 4.005778 7.9 50.70604 0.6762609 0.6389064\n\n$`Type I`\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 293.6 146.8 9.1486 0.0009812 ***\npre 1 577.9 577.9 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$`Type II`\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 68.55 34.28 2.1361 0.1384 \npre 1 577.90 577.90 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$`Type III`\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 2 68.55 34.28 2.1361 0.1384 \npre 1 577.90 577.90 36.0145 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$Parameter\n Estimate Estimable Std. Error Df t value Pr(>|t|) \n(Intercept) -0.4347 0 2.4714 26 -0.1759 0.86175 \ndrugA -3.4461 0 1.8868 26 -1.8265 0.07928 . \ndrugD -3.3372 0 1.8539 26 -1.8001 0.08346 . \ndrugF 0.0000 0 0.0000 26 \npre 0.9872 1 0.1645 26 6.0012 2.454e-06 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n$`Expected Mean`\n LSmean LowerCL UpperCL SE Df\n(Intercept) 7.900000 6.396685 9.403315 0.7313516 26\ndrugA 6.714963 4.066426 9.363501 1.2884943 26\ndrugD 6.823935 4.208337 9.439532 1.2724690 26\ndrugF 10.161102 7.456182 12.866021 1.3159234 26\npre 7.900000 6.396685 9.403315 0.7313516 26\n```\n\n\n:::\n:::\n\n\nNote that the LSMEANS statistics are produced using the `EMEAN = TRUE` option. The `BETA = TRUE` option is equivalent to the `SOLUTION` option in SAS. See the **sasLM** documentation for additional information.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P broom * 1.0.12 2026-01-27 [?] RSPM (R 4.5.0)\n P car 3.1-5 2026-02-03 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P emmeans * 2.0.1 2025-12-16 [?] RSPM (R 4.5.0)\n P gt * 1.3.0 2026-01-22 [?] RSPM (R 4.5.0)\n P rstatix * 0.7.3 2025-10-18 [?] RSPM (R 4.5.0)\n P sasLM * 0.10.7 2025-09-28 [?] RSPM (R 4.5.0)\n P tibble * 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", "supporting": [ "ancova_files" ], diff --git a/_freeze/R/ancova/figure-html/unnamed-chunk-14-1.png b/_freeze/R/ancova/figure-html/unnamed-chunk-14-1.png index b124310ca..9099e36fa 100644 Binary files a/_freeze/R/ancova/figure-html/unnamed-chunk-14-1.png and b/_freeze/R/ancova/figure-html/unnamed-chunk-14-1.png differ diff --git a/_freeze/R/anova/execute-results/html.json b/_freeze/R/anova/execute-results/html.json index 893546791..7632b1237 100644 --- a/_freeze/R/anova/execute-results/html.json +++ b/_freeze/R/anova/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "7cc09c6ef70c7df740ec8e88116f7081", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"ANOVA\"\n---\n\n## Introduction\n\nANOVA (Analysis of Variance) is a statistical method used to compare the means of three or more groups to determine if at least one group mean is significantly different from the others. It helps to test hypotheses about group differences based on sample data.\n\nThe key assumptions include:\n\n- Independence of observations\n- Normality of the data (the distribution should be approximately normal)\n- Homogeneity of variances (similar variances across groups)\n\nCommon types include one-way ANOVA (one independent variable) and two-way ANOVA (two independent variables).\n\nOne-way ANOVA tests the effect of a single independent variable on a dependent variable (the grouping factor).\n\nTwo-way ANOVA tests the effect of two independent variables on a dependent variable and also examines if there is an interaction between the two independent variables.\n\n### Getting Started\n\nTo demonstrate the various types of sums of squares, we'll create a data frame called `df_disease` taken from the SAS documentation. The corresponding data can be found [here](https://github.com/PSIAIMS/CAMIS/blob/main/data/sas_disease.csv).\n\n\n\n### The Model {.unnumbered}\n\nFor this example, we're testing for a significant difference in `stem_length` using ANOVA. Before getting the sums of squares and associated p-values from the ANOVA, we need to fit a linear model. In R, we're using `lm()` to fit the model, and then using `broom::glance()` and `broom::tidy()` to view the results in a table format.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model <- lm(y ~ drug + disease + drug * disease, df_disease)\n```\n:::\n\n\nThe `glance` function gives us a summary of the model diagnostic values.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model |>\n glance()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 12\n r.squared adj.r.squared sigma statistic p.value df logLik AIC BIC\n \n1 0.456 0.326 10.5 3.51 0.00130 11 -212. 450. 477.\n# ℹ 3 more variables: deviance , df.residual , nobs \n```\n\n\n:::\n:::\n\n\nThe `tidy` function gives a summary of the model results.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model |>\n tidy()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 12 × 5\n term estimate std.error statistic p.value\n \n 1 (Intercept) 29.3 4.29 6.84 0.0000000160\n 2 drug2 -1.33 6.36 -0.210 0.835 \n 3 drug3 -13.0 7.43 -1.75 0.0869 \n 4 drug4 -15.7 6.36 -2.47 0.0172 \n 5 disease2 -1.08 6.78 -0.160 0.874 \n 6 disease3 -8.93 6.36 -1.40 0.167 \n 7 drug2:disease2 6.58 9.78 0.673 0.504 \n 8 drug3:disease2 -10.8 10.2 -1.06 0.295 \n 9 drug4:disease2 0.317 9.30 0.0340 0.973 \n10 drug2:disease3 -0.900 9.00 -0.100 0.921 \n11 drug3:disease3 1.10 10.2 0.107 0.915 \n12 drug4:disease3 9.53 9.20 1.04 0.306 \n```\n\n\n:::\n:::\n\n\n\n### Sums of Squares Tables {.unnumbered}\n\n#### Type I \n\nType I sums of square, also known as sequential ANOVA, is a method of analysis of variance where model terms are assessed sequentially. In this approach, the contribution of each factor or variable to the model is evaluated in the order they are specified, with each factor being adjusted for the effects of those that precede it. This means that the significance of a factor can depend on the factors that have already been included in the model. Type I ANOVA is useful for hierarchical models, where the sequence of entering factors into the model is meaningful or based on theoretical considerations. While possible to use on unbalanced designs it is often not testing the hypothesis of interest. \n\nFor a model with two factors, A and B (in that order) the sums of squares will be tested like this: \n- SS(A) for factor A. \n- SS(B | A) for factor B. \n- SS(AB | B, A) for interaction AB. \n\nThis can be calculated using, the base R {stats} package or the {rstatix} package. Both give the same result. \n\n##### stats\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::anova(lm_model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Variance Table\n\nResponse: y\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 3 3133.2 1044.41 9.4558 5.58e-05 ***\ndisease 2 418.8 209.42 1.8960 0.1617 \ndrug:disease 6 707.3 117.88 1.0672 0.3958 \nResiduals 46 5080.8 110.45 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix \n\n::: {.cell}\n\n```{.r .cell-code}\ndf_disease |>\n rstatix::anova_test(\n y ~ drug + disease + drug * disease,\n type = 1,\n detailed = TRUE\n )\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: NA detected in rows: 8,10,15,20,25,29,37,38,41,43,51,54,56,72.\nRemoving this rows before the analysis.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\nANOVA Table (type I tests)\n\n Effect DFn DFd SSn SSd F p p<.05 ges\n1 drug 3 46 3133.239 5080.817 9.456 5.58e-05 * 0.381\n2 disease 2 46 418.834 5080.817 1.896 1.62e-01 0.076\n3 drug:disease 6 46 707.266 5080.817 1.067 3.96e-01 0.122\n```\n\n\n:::\n:::\n\n\n\n#### Type II \n\nType II sum of squares also known as hierarchical or partially sequential sums of squares. Tests the effect of adding a factor to the model after all other factors have been added. This means that the significance of a factor is assessed while controlling for the effects of all other factors in the model, but not for interactions. Type II ANOVA is particularly useful when there are no interactions in the model or when the focus is on main effects only. It is often used in unbalanced designs, where the number of observations varies across groups.\n\nFor a model with two factors, A and B (in that order) the sums of squares will be tested like this: \n- SS(A | B) for factor A. \n- SS(B | A) for factor B. \n\nThis can be calculated using the {car} package or the {rstatix} package. Both give the same result.\n\n\n##### car\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(lm_model, type = \"II\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type II tests)\n\nResponse: y\n Sum Sq Df F value Pr(>F) \ndrug 3063.4 3 9.2451 6.748e-05 ***\ndisease 418.8 2 1.8960 0.1617 \ndrug:disease 707.3 6 1.0672 0.3958 \nResiduals 5080.8 46 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix \n\n::: {.cell}\n\n```{.r .cell-code}\ndf_disease |>\n rstatix::anova_test(\n y ~ drug + disease + drug * disease,\n type = 2,\n detailed = TRUE\n )\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: NA detected in rows: 8,10,15,20,25,29,37,38,41,43,51,54,56,72.\nRemoving this rows before the analysis.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\nANOVA Table (type II tests)\n\n Effect SSn SSd DFn DFd F p p<.05 ges\n1 drug 3063.433 5080.817 3 46 9.245 6.75e-05 * 0.376\n2 disease 418.834 5080.817 2 46 1.896 1.62e-01 0.076\n3 drug:disease 707.266 5080.817 6 46 1.067 3.96e-01 0.122\n```\n\n\n:::\n:::\n\n\n\n#### Type III \n\nType III sum of squares is calculated such that every effect is adjusted for all other effect. This means testing for the presence of a main effect after adjusting for other main effects and interactions. \nFor a model with two factors, A and B (in that order) the sums of squares will be tested like this: \n- SS(A | B, AB) for factor A. \n- SS(B | A, AB) for factor B. \n\nThis can be calculated using the base R {stats} package, the {car} package or the {rstatix} package. All give the same result. \n\nNote: Calculating type III sums of squares in R is a bit tricky, because the multi-way ANOVA model is over-paramerterised. So when running the linear model we need to select a design matrix that sums to zero. In R those options will be either `\"contr.sum\"` or `\"contr.poly\"`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Drug design matrix\ncontr.sum(4) # Using 4 here as we have 4 levels of drug\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2] [,3]\n1 1 0 0\n2 0 1 0\n3 0 0 1\n4 -1 -1 -1\n```\n\n\n:::\n\n```{.r .cell-code}\n# Disease design matrix\ncontr.sum(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2]\n1 1 0\n2 0 1\n3 -1 -1\n```\n\n\n:::\n:::\n\n\nWhile not relevant for this example as the disease variable isn't ordinal the polynomial design matrix would look like \n\n::: {.cell}\n\n```{.r .cell-code}\ncontr.poly(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n .L .Q\n[1,] -7.071068e-01 0.4082483\n[2,] -9.681035e-17 -0.8164966\n[3,] 7.071068e-01 0.4082483\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model <- lm(\n y ~ drug + disease + drug * disease,\n df_disease,\n contrasts = list(drug = \"contr.sum\", disease = \"contr.sum\")\n)\n```\n:::\n\n\n##### stats\n\nUsing the base stats package, you can use the `drop1()` function which drops all possible single terms in a model. The scope term specifies how things can be dropped.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::drop1(lm_model, scope = . ~ ., test = \"F\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSingle term deletions\n\nModel:\ny ~ drug + disease + drug * disease\n Df Sum of Sq RSS AIC F value Pr(>F) \n 5080.8 283.42 \ndrug 3 2997.47 8078.3 304.32 9.0460 8.086e-05 ***\ndisease 2 415.87 5496.7 283.99 1.8826 0.1637 \ndrug:disease 6 707.27 5788.1 278.98 1.0672 0.3958 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### car\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(lm_model, type = \"III\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type III tests)\n\nResponse: y\n Sum Sq Df F value Pr(>F) \n(Intercept) 20037.6 1 181.4138 < 2.2e-16 ***\ndrug 2997.5 3 9.0460 8.086e-05 ***\ndisease 415.9 2 1.8826 0.1637 \ndrug:disease 707.3 6 1.0672 0.3958 \nResiduals 5080.8 46 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix \nThe `rstatix` package uses the `car` package to do the anova calculation, but can be nicer to use as it handles the contrasts for you and is more \"pipe-able\".\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_disease |>\n rstatix::anova_test(\n y ~ drug + disease + drug * disease,\n type = 3,\n detailed = TRUE\n )\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: NA detected in rows: 8,10,15,20,25,29,37,38,41,43,51,54,56,72.\nRemoving this rows before the analysis.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\nANOVA Table (type III tests)\n\n Effect SSn SSd DFn DFd F p p<.05 ges\n1 (Intercept) 20037.613 5080.817 1 46 181.414 1.42e-17 * 0.798\n2 drug 2997.472 5080.817 3 46 9.046 8.09e-05 * 0.371\n3 disease 415.873 5080.817 2 46 1.883 1.64e-01 0.076\n4 drug:disease 707.266 5080.817 6 46 1.067 3.96e-01 0.122\n```\n\n\n:::\n:::\n\n\n\n#### Type IV {.unnumbered}\n\nIn R there is no equivalent operation to the `Type IV` sums of squares calculation in SAS.\n\n\n### Contrasts {.unnumbered}\n\nThe easiest way to get contrasts in R is by using `emmeans`. For looking at contrast we are going to fit a different model on new data, that doesn't include an interaction term as it is easier to calculate contrasts without an interaction term. For this dataset we have three different drugs A, C, and E.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_trial <- read.csv(\"../data/drug_trial.csv\")\n\nlm(formula = post ~ pre + drug, data = df_trial) |>\n emmeans(\"drug\") |>\n contrast(\n method = list(\n \"C vs A\" = c(-1, 1, 0),\n \"E vs CA\" = c(-1, -1, 2)\n )\n )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df t.ratio p.value\n C vs A 0.109 1.80 26 0.061 0.9521\n E vs CA 6.783 3.28 26 2.067 0.0488\n```\n\n\n:::\n:::\n\n\n## References\nGöttingen University. (n.d.). Type II and III SS using the car package. Retrieved 19 August 2025, from https://md.psych.bio.uni-goettingen.de/mv/unit/lm_cat/lm_cat_unbal_ss_explained.html#type-ii-and-iii-ss-using-the-car-package\n", + "markdown": "---\ntitle: \"ANOVA\"\n---\n\n## Introduction\n\nANOVA (Analysis of Variance) is a statistical method used to compare the means of three or more groups to determine if at least one group mean is significantly different from the others. It helps to test hypotheses about group differences based on sample data.\n\nThe key assumptions include:\n\n- Independence of observations\n- Normality of the data (the distribution should be approximately normal)\n- Homogeneity of variances (similar variances across groups)\n\nCommon types include one-way ANOVA (one independent variable) and two-way ANOVA (two independent variables).\n\nOne-way ANOVA tests the effect of a single independent variable on a dependent variable (the grouping factor).\n\nTwo-way ANOVA tests the effect of two independent variables on a dependent variable and also examines if there is an interaction between the two independent variables.\n\n### Getting Started\n\nTo demonstrate the various types of sums of squares, we'll create a data frame called `df_disease` taken from the SAS documentation. The corresponding data can be found [here](https://github.com/PSIAIMS/CAMIS/blob/main/data/sas_disease.csv).\n\n\n\n### The Model {.unnumbered}\n\nFor this example, we're testing for a significant difference in `stem_length` using ANOVA. Before getting the sums of squares and associated p-values from the ANOVA, we need to fit a linear model. In R, we're using `lm()` to fit the model, and then using `broom::glance()` and `broom::tidy()` to view the results in a table format.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model <- lm(y ~ drug + disease + drug * disease, df_disease)\n```\n:::\n\n\nThe `glance` function gives us a summary of the model diagnostic values.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model |>\n glance()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 12\n r.squared adj.r.squared sigma statistic p.value df logLik AIC BIC\n \n1 0.456 0.326 10.5 3.51 0.00130 11 -212. 450. 477.\n# ℹ 3 more variables: deviance , df.residual , nobs \n```\n\n\n:::\n:::\n\n\nThe `tidy` function gives a summary of the model results.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model |>\n tidy()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 12 × 5\n term estimate std.error statistic p.value\n \n 1 (Intercept) 29.3 4.29 6.84 0.0000000160\n 2 drug2 -1.33 6.36 -0.210 0.835 \n 3 drug3 -13 7.43 -1.75 0.0869 \n 4 drug4 -15.7 6.36 -2.47 0.0172 \n 5 disease2 -1.08 6.78 -0.160 0.874 \n 6 disease3 -8.93 6.36 -1.40 0.167 \n 7 drug2:disease2 6.58 9.78 0.673 0.504 \n 8 drug3:disease2 -10.9 10.2 -1.06 0.295 \n 9 drug4:disease2 0.317 9.30 0.0340 0.973 \n10 drug2:disease3 -0.900 9.00 -0.100 0.921 \n11 drug3:disease3 1.10 10.2 0.107 0.915 \n12 drug4:disease3 9.53 9.20 1.04 0.306 \n```\n\n\n:::\n:::\n\n\n\n### Sums of Squares Tables {.unnumbered}\n\n#### Type I \n\nType I sums of square, also known as sequential ANOVA, is a method of analysis of variance where model terms are assessed sequentially. In this approach, the contribution of each factor or variable to the model is evaluated in the order they are specified, with each factor being adjusted for the effects of those that precede it. This means that the significance of a factor can depend on the factors that have already been included in the model. Type I ANOVA is useful for hierarchical models, where the sequence of entering factors into the model is meaningful or based on theoretical considerations. While possible to use on unbalanced designs it is often not testing the hypothesis of interest. \n\nFor a model with two factors, A and B (in that order) the sums of squares will be tested like this: \n- SS(A) for factor A. \n- SS(B | A) for factor B. \n- SS(AB | B, A) for interaction AB. \n\nThis can be calculated using, the base R {stats} package or the {rstatix} package. Both give the same result. \n\n##### stats\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::anova(lm_model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Variance Table\n\nResponse: y\n Df Sum Sq Mean Sq F value Pr(>F) \ndrug 3 3133.2 1044.41 9.4558 5.58e-05 ***\ndisease 2 418.8 209.42 1.8960 0.1617 \ndrug:disease 6 707.3 117.88 1.0672 0.3958 \nResiduals 46 5080.8 110.45 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix \n\n::: {.cell}\n\n```{.r .cell-code}\ndf_disease |>\n rstatix::anova_test(\n y ~ drug + disease + drug * disease,\n type = 1,\n detailed = TRUE\n )\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: NA detected in rows: 8,10,15,20,25,29,37,38,41,43,51,54,56,72.\nRemoving this rows before the analysis.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\nANOVA Table (type I tests)\n\n Effect DFn DFd SSn SSd F p p<.05 ges\n1 drug 3 46 3133.239 5080.817 9.456 5.58e-05 * 0.381\n2 disease 2 46 418.834 5080.817 1.896 1.62e-01 0.076\n3 drug:disease 6 46 707.266 5080.817 1.067 3.96e-01 0.122\n```\n\n\n:::\n:::\n\n\n\n#### Type II \n\nType II sum of squares also known as hierarchical or partially sequential sums of squares. Tests the effect of adding a factor to the model after all other factors have been added. This means that the significance of a factor is assessed while controlling for the effects of all other factors in the model, but not for interactions. Type II ANOVA is particularly useful when there are no interactions in the model or when the focus is on main effects only. It is often used in unbalanced designs, where the number of observations varies across groups.\n\nFor a model with two factors, A and B (in that order) the sums of squares will be tested like this: \n- SS(A | B) for factor A. \n- SS(B | A) for factor B. \n\nThis can be calculated using the {car} package or the {rstatix} package. Both give the same result.\n\n\n##### car\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(lm_model, type = \"II\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type II tests)\n\nResponse: y\n Sum Sq Df F value Pr(>F) \ndrug 3063.4 3 9.2451 6.748e-05 ***\ndisease 418.8 2 1.8960 0.1617 \ndrug:disease 707.3 6 1.0672 0.3958 \nResiduals 5080.8 46 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix \n\n::: {.cell}\n\n```{.r .cell-code}\ndf_disease |>\n rstatix::anova_test(\n y ~ drug + disease + drug * disease,\n type = 2,\n detailed = TRUE\n )\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: NA detected in rows: 8,10,15,20,25,29,37,38,41,43,51,54,56,72.\nRemoving this rows before the analysis.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\nANOVA Table (type II tests)\n\n Effect SSn SSd DFn DFd F p p<.05 ges\n1 drug 3063.433 5080.817 3 46 9.245 6.75e-05 * 0.376\n2 disease 418.834 5080.817 2 46 1.896 1.62e-01 0.076\n3 drug:disease 707.266 5080.817 6 46 1.067 3.96e-01 0.122\n```\n\n\n:::\n:::\n\n\n\n#### Type III \n\nType III sum of squares is calculated such that every effect is adjusted for all other effect. This means testing for the presence of a main effect after adjusting for other main effects and interactions. \nFor a model with two factors, A and B (in that order) the sums of squares will be tested like this: \n- SS(A | B, AB) for factor A. \n- SS(B | A, AB) for factor B. \n\nThis can be calculated using the base R {stats} package, the {car} package or the {rstatix} package. All give the same result. \n\nNote: Calculating type III sums of squares in R is a bit tricky, because the multi-way ANOVA model is over-paramerterised. So when running the linear model we need to select a design matrix that sums to zero. In R those options will be either `\"contr.sum\"` or `\"contr.poly\"`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Drug design matrix\ncontr.sum(4) # Using 4 here as we have 4 levels of drug\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2] [,3]\n1 1 0 0\n2 0 1 0\n3 0 0 1\n4 -1 -1 -1\n```\n\n\n:::\n\n```{.r .cell-code}\n# Disease design matrix\ncontr.sum(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1] [,2]\n1 1 0\n2 0 1\n3 -1 -1\n```\n\n\n:::\n:::\n\n\nWhile not relevant for this example as the disease variable isn't ordinal the polynomial design matrix would look like \n\n::: {.cell}\n\n```{.r .cell-code}\ncontr.poly(3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n .L .Q\n[1,] -7.071068e-01 0.4082483\n[2,] -7.850462e-17 -0.8164966\n[3,] 7.071068e-01 0.4082483\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_model <- lm(\n y ~ drug + disease + drug * disease,\n df_disease,\n contrasts = list(drug = \"contr.sum\", disease = \"contr.sum\")\n)\n```\n:::\n\n\n##### stats\n\nUsing the base stats package, you can use the `drop1()` function which drops all possible single terms in a model. The scope term specifies how things can be dropped.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nstats::drop1(lm_model, scope = . ~ ., test = \"F\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSingle term deletions\n\nModel:\ny ~ drug + disease + drug * disease\n Df Sum of Sq RSS AIC F value Pr(>F) \n 5080.8 283.42 \ndrug 3 2997.47 8078.3 304.32 9.0460 8.086e-05 ***\ndisease 2 415.87 5496.7 283.99 1.8826 0.1637 \ndrug:disease 6 707.27 5788.1 278.98 1.0672 0.3958 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### car\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncar::Anova(lm_model, type = \"III\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnova Table (Type III tests)\n\nResponse: y\n Sum Sq Df F value Pr(>F) \n(Intercept) 20037.6 1 181.4138 < 2.2e-16 ***\ndrug 2997.5 3 9.0460 8.086e-05 ***\ndisease 415.9 2 1.8826 0.1637 \ndrug:disease 707.3 6 1.0672 0.3958 \nResiduals 5080.8 46 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\n##### rstatix \nThe `rstatix` package uses the `car` package to do the anova calculation, but can be nicer to use as it handles the contrasts for you and is more \"pipe-able\".\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_disease |>\n rstatix::anova_test(\n y ~ drug + disease + drug * disease,\n type = 3,\n detailed = TRUE\n )\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning: NA detected in rows: 8,10,15,20,25,29,37,38,41,43,51,54,56,72.\nRemoving this rows before the analysis.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\nANOVA Table (type III tests)\n\n Effect SSn SSd DFn DFd F p p<.05 ges\n1 (Intercept) 20037.613 5080.817 1 46 181.414 1.42e-17 * 0.798\n2 drug 2997.472 5080.817 3 46 9.046 8.09e-05 * 0.371\n3 disease 415.873 5080.817 2 46 1.883 1.64e-01 0.076\n4 drug:disease 707.266 5080.817 6 46 1.067 3.96e-01 0.122\n```\n\n\n:::\n:::\n\n\n\n#### Type IV {.unnumbered}\n\nIn R there is no equivalent operation to the `Type IV` sums of squares calculation in SAS.\n\n\n### Contrasts {.unnumbered}\n\nThe easiest way to get contrasts in R is by using `emmeans`. For looking at contrast we are going to fit a different model on new data, that doesn't include an interaction term as it is easier to calculate contrasts without an interaction term. For this dataset we have three different drugs A, C, and E.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndf_trial <- read.csv(\"../data/drug_trial.csv\")\n\nlm(formula = post ~ pre + drug, data = df_trial) |>\n emmeans(\"drug\") |>\n contrast(\n method = list(\n \"C vs A\" = c(-1, 1, 0),\n \"E vs CA\" = c(-1, -1, 2)\n )\n )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df t.ratio p.value\n C vs A 0.109 1.80 26 0.061 0.9521\n E vs CA 6.783 3.28 26 2.067 0.0488\n```\n\n\n:::\n:::\n\n\n## References\nGöttingen University. (n.d.). Type II and III SS using the car package. Retrieved 19 August 2025, from https://md.psych.bio.uni-goettingen.de/mv/unit/lm_cat/lm_cat_unbal_ss_explained.html#type-ii-and-iii-ss-using-the-car-package\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/binomial_test/execute-results/html.json b/_freeze/R/binomial_test/execute-results/html.json index 89a428d76..a45fa01a8 100644 --- a/_freeze/R/binomial_test/execute-results/html.json +++ b/_freeze/R/binomial_test/execute-results/html.json @@ -1,11 +1,9 @@ { - "hash": "b914f85924354e0a2ac767d92e457d92", + "hash": "183c195a38350468d916f616726dce7f", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Binomial Test\"\n---\n\nThe statistical test used to determine whether the proportion in a binary outcome experiment is equal to a specific value. It is appropriate when we have a small sample size and want to test the success probability $p$ against a hypothesized value $p_0$.\n\n## Creating a sample dataset\n\n- We will generate a dataset where we record the outcomes of 1000 coin flips.\n\n- We will use the `binom.test` function to test if the proportion of heads is significantly different from 0.5.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(19)\ncoin_flips <- sample(c(\"H\", \"T\"), size = 1000, replace = T, prob = c(0.5, 0.5))\n```\n:::\n\n\nNow, we will count the heads and tails and summarize the data.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# heads\nheads_count <- sum(coin_flips == \"H\")\nheads_count\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 513\n```\n\n\n:::\n\n```{.r .cell-code}\n# tails\ntails_count <- sum(coin_flips == \"T\")\ntails_count\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 487\n```\n\n\n:::\n\n```{.r .cell-code}\n# total\ntotal_flips <- length(coin_flips)\ntotal_flips\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 1000\n```\n\n\n:::\n:::\n\n\n## Conducting Binomial Test\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbinom_test_result <- stats::binom.test(heads_count, total_flips, p = 0.5)\nbinom_test_result\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact binomial test\n\ndata: heads_count and total_flips\nnumber of successes = 513, number of trials = 1000, p-value = 0.4292\nalternative hypothesis: true probability of success is not equal to 0.5\n95 percent confidence interval:\n 0.4815213 0.5444020\nsample estimates:\nprobability of success \n 0.513 \n```\n\n\n:::\n:::\n\n\n### Results:\n\nThe output has a p-value 0.4292098 $> 0.05$ (chosen level of significance). Hence, we fail to reject the null hypothesis and conclude that the **coin is fair**.\n\n# Example of Clinical Trial Data\n\nWe load the `lung` dataset from `survival` package. We want to test if the proportion of patients with survival status 1 (dead) is significantly different from a hypothesized proportion (e.g. 50%)\n\nWe will calculate number of deaths and total number of patients.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nattach(lung)\n\n# deaths\nnum_deaths <- sum(lung$status == 1)\nnum_deaths\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 63\n```\n\n\n:::\n\n```{.r .cell-code}\n# total patients\ntotal_pat <- nrow(lung)\ntotal_pat\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 228\n```\n\n\n:::\n:::\n\n\n## Conduct the Binomial Test\n\nWe will conduct the Binomial test and hypothesize that the proportion of death should be 19%.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbinom_test <- stats::binom.test(num_deaths, total_pat, p = 0.19)\nbinom_test\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact binomial test\n\ndata: num_deaths and total_pat\nnumber of successes = 63, number of trials = 228, p-value = 0.001683\nalternative hypothesis: true probability of success is not equal to 0.19\n95 percent confidence interval:\n 0.2193322 0.3392187\nsample estimates:\nprobability of success \n 0.2763158 \n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value 0.0016829 $< 0.05$ (chosen level of significance). Hence, we reject the null hypothesis and conclude that **the proportion of death is significantly different from 19%**.\n", - "supporting": [ - "binomial_test_files" - ], + "markdown": "---\ntitle: \"Binomial Test\"\n---\n\nThe statistical test used to determine whether the proportion in a binary outcome experiment is equal to a specific value. It is appropriate when we have a small sample size and want to test the success probability $p$ against a hypothesized value $p_0$.\n\n# Coin flips dataset.\n\n- We will use coin flips dataset generated from SAS simulation to carry out four binomial tests (Exact test, Wald test, Mid-p adjusted test and Wilson score test). This is to ensure that the proportion value obtained from coin flips dataset is similar for both software rather than simulating in both instances, which leads to different proportion value.\n\n- We will use the various functions for each test to investigate if the proportion of heads is significantly different from 0.5. Therefore:\n\n$H_0 : p = 0.5$\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# heads\nheads_count <- 520\nheads_count\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 520\n```\n\n\n:::\n\n```{.r .cell-code}\n# tails\ntails_count <- 480\ntails_count\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 480\n```\n\n\n:::\n\n```{.r .cell-code}\n# total\ntotal_flips <- 1000\ntotal_flips\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 1000\n```\n\n\n:::\n:::\n\n\n## 1. Exact Binomial Test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbinom.test(heads_count, total_flips, p = 0.5, conf.level = 0.95)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact binomial test\n\ndata: heads_count and total_flips\nnumber of successes = 520, number of trials = 1000, p-value = 0.2174\nalternative hypothesis: true probability of success is not equal to 0.5\n95 percent confidence interval:\n 0.4885149 0.5513671\nsample estimates:\nprobability of success \n 0.52 \n```\n\n\n:::\n:::\n\n\n### Results:\n\nThe output has a p-value $> 0.05$ (chosen level of significance). Hence, we fail to reject the null hypothesis and conclude that the **coin is fair**.\n\n## 2. Wald(Asymptotic) Test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(DescTools)\np=0.5\nphat<-heads_count/total_flips\nphat\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.52\n```\n\n\n:::\n\n```{.r .cell-code}\nz <- (phat - p) / sqrt(p * (1 - p) / total_flips)\n2 * (1 - pnorm(abs(z)))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.2059032\n```\n\n\n:::\n\n```{.r .cell-code}\nBinomCI(heads_count, total_flips, method = \"wald\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est lwr.ci upr.ci\n[1,] 0.52 0.4890351 0.5509649\n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value $> 0.05$ (chosen level of significance). Hence, we fail to reject the null hypothesis and conclude that the **coin is fair**.\n\n## 3. Mid-P adjusted Exact Binomial Test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(exactci)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: ssanv\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: testthat\n```\n\n\n:::\n\n```{.r .cell-code}\nbinom.exact(heads_count,total_flips, p = 0.5,alternative = \"greater\", midp = TRUE,tsmethod = \"central\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact one-sided binomial test, mid-p version\n\ndata: heads_count and total_flips\nnumber of successes = 520, number of trials = 1000, p-value = 0.1031\nalternative hypothesis: true probability of success is greater than 0.5\n95 percent confidence interval:\n 0.4939862 1.0000000\nsample estimates:\nprobability of success \n 0.52 \n```\n\n\n:::\n\n```{.r .cell-code}\nbinom.exact(heads_count,total_flips, p = 0.5,alternative = \"less\", midp = TRUE,tsmethod = \"central\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact one-sided binomial test, mid-p version\n\ndata: heads_count and total_flips\nnumber of successes = 520, number of trials = 1000, p-value = 0.8969\nalternative hypothesis: true probability of success is less than 0.5\n95 percent confidence interval:\n 0.0000000 0.5459277\nsample estimates:\nprobability of success \n 0.52 \n```\n\n\n:::\n\n```{.r .cell-code}\nbinom.exact(heads_count,total_flips, p = 0.5, midp = TRUE,tsmethod = \"central\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact two-sided binomial test (central method), mid-p version\n\ndata: heads_count and total_flips\nnumber of successes = 520, number of trials = 1000, p-value = 0.2061\nalternative hypothesis: true probability of success is not equal to 0.5\n95 percent confidence interval:\n 0.4890192 0.5508727\nsample estimates:\nprobability of success \n 0.52 \n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value $> 0.05$ (chosen level of significance). Hence, we fail to reject the null hypothesis and conclude that the **coin is fair**.\n\n## 3. Wilson Score Test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nprop.test(heads_count, total_flips, p = 0.5, correct = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\t1-sample proportions test without continuity correction\n\ndata: heads_count out of total_flips, null probability 0.5\nX-squared = 1.6, df = 1, p-value = 0.2059\nalternative hypothesis: true p is not equal to 0.5\n95 percent confidence interval:\n 0.4890177 0.5508292\nsample estimates:\n p \n0.52 \n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value $> 0.05$ (chosen level of significance). Hence, we fail to reject the null hypothesis and conclude that the **coin is fair**.\n\n# Example of Clinical Trial Data.\n\nWe load the `lung` dataset from `survival` package. We want to test if the proportion of patients with survival status 1 (dead) is significantly different from a hypothesized proportion (e.g. 50%)\n\n$H_0 : p = 0.19$\n\nWe will calculate number of deaths and total number of patients.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nattach(lung)\n\n# deaths\nnum_deaths <- sum(lung$status == 1)\nnum_deaths\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 63\n```\n\n\n:::\n\n```{.r .cell-code}\n# total patients\ntotal_pat <- nrow(lung)\ntotal_pat\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 228\n```\n\n\n:::\n:::\n\n\n## 1. Exact Binomial Test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Exact (Clopper–Pearson)\nbinom.test(num_deaths, total_pat, p = 0.19, conf.level = 0.95)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact binomial test\n\ndata: num_deaths and total_pat\nnumber of successes = 63, number of trials = 228, p-value = 0.001683\nalternative hypothesis: true probability of success is not equal to 0.19\n95 percent confidence interval:\n 0.2193322 0.3392187\nsample estimates:\nprobability of success \n 0.2763158 \n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value $< 0.05$ (chosen level of significance). Hence, we reject the null hypothesis and conclude that **the proportion of death is significantly different from 19%**.\n\n## 2. Wald(Asymptotic) Test\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(DescTools)\np=0.19\nphat<-num_deaths/total_pat\nz <- (phat - p) / sqrt(p * (1 - p) / total_pat)\n 2 * (1 - pnorm(abs(z)))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0008927984\n```\n\n\n:::\n\n```{.r .cell-code}\nBinomCI(num_deaths, total_pat, method = \"wald\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est lwr.ci upr.ci\n[1,] 0.2763158 0.2182717 0.3343599\n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value $< 0.05$ (chosen level of significance). Hence, we reject the null hypothesis and conclude that **the proportion of death is significantly different from 19%**.\n\n## 3. Mid-P adjusted Exact Binomial Test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(exactci)\nbinom.exact(num_deaths, total_pat, p = 0.19, midp = TRUE,tsmethod = \"central\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact two-sided binomial test (central method), mid-p version\n\ndata: num_deaths and total_pat\nnumber of successes = 63, number of trials = 228, p-value = 0.001528\nalternative hypothesis: true probability of success is not equal to 0.19\n95 percent confidence interval:\n 0.2212055 0.3370776\nsample estimates:\nprobability of success \n 0.2763158 \n```\n\n\n:::\n\n```{.r .cell-code}\nbinom.exact(num_deaths, total_pat, p = 0.19,alternative = \"less\", midp = TRUE,tsmethod = \"central\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact one-sided binomial test, mid-p version\n\ndata: num_deaths and total_pat\nnumber of successes = 63, number of trials = 228, p-value = 0.9992\nalternative hypothesis: true probability of success is less than 0.19\n95 percent confidence interval:\n 0.00000 0.32708\nsample estimates:\nprobability of success \n 0.2763158 \n```\n\n\n:::\n\n```{.r .cell-code}\nbinom.exact(num_deaths, total_pat, p = 0.19,alternative = \"greater\", midp = TRUE,tsmethod = \"central\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact one-sided binomial test, mid-p version\n\ndata: num_deaths and total_pat\nnumber of successes = 63, number of trials = 228, p-value = 0.000764\nalternative hypothesis: true probability of success is greater than 0.19\n95 percent confidence interval:\n 0.2297195 1.0000000\nsample estimates:\nprobability of success \n 0.2763158 \n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output for right sided and two sided test has a p-value $< 0.05$ (chosen level of significance). Hence, we reject the null hypothesis and conclude that **the proportion of death is significantly different from 19%**.\n\n## 4. Wilson Score Test\n\n\n::: {.cell}\n\n```{.r .cell-code}\nprop.test(num_deaths, total_pat, p = 0.19, correct = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\t1-sample proportions test without continuity correction\n\ndata: num_deaths out of total_pat, null probability 0.19\nX-squared = 11.038, df = 1, p-value = 0.0008928\nalternative hypothesis: true p is not equal to 0.19\n95 percent confidence interval:\n 0.2223417 0.3377025\nsample estimates:\n p \n0.2763158 \n```\n\n\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value $< 0.05$ (chosen level of significance). Hence, we reject the null hypothesis and conclude that **the proportion of death is significantly different from 19%**.\n\n## Summary:\n\n| Data | Test | P_Value |\n|----------------|---------------------------|---------|\n| Coin Flips | Exact Test | 0.2174 |\n| | Wald Test | 0.2059 |\n| | Mid-p adjusted Exact Test | 0.2061 |\n| | Wilson score Test | 0.2059 |\n| Clinical Trial | Exact Test | 0.0017 |\n| | Wald Test | 0.0009 |\n| | Mid-p adjusted Exact Test | 0.0015 |\n| | Wilson Score Test | 0.0009 |\n\nFor the two datasets, the results for Wald and Wilson Score test match. This implies that the sample data are adequate because Wald and Wilson differ mainly when sample size is small or probability of success is close to \\[0,1\\]. In that case Wilson score test will have better coverage.\n\nMore detailed information around CIs for proportions can be found [here](https://psiaims.github.io/CAMIS/R/ci_for_prop.html)\n", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-11-1.png b/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-11-1.png index 6f7794020..12130a233 100644 Binary files a/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-11-1.png and b/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-11-1.png differ diff --git a/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-9-1.png b/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-9-1.png index 819701cfe..dae03d7e6 100644 Binary files a/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-9-1.png and b/_freeze/R/causal_ps_matching/figure-html/unnamed-chunk-9-1.png differ diff --git a/_freeze/R/ci_for_2indep_prop/execute-results/html.json b/_freeze/R/ci_for_2indep_prop/execute-results/html.json index 06663a314..0f6217cfe 100644 --- a/_freeze/R/ci_for_2indep_prop/execute-results/html.json +++ b/_freeze/R/ci_for_2indep_prop/execute-results/html.json @@ -1,11 +1,9 @@ { - "hash": "38d5546f6ff5734c08ab1e4a4bfe7104", + "hash": "201686e174ccac08f30faece13d4a2ce", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Confidence Intervals for Independent Proportions in R\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n# Introduction\n\nThis page covers confidence intervals for comparisons of two independent proportions in R, including the contrast parameters for risk difference (RD) $\\theta_{RD} = p_1 - p_2$, relative risk (RR) $\\theta_{RR} = p_1 / p_2$, and odds ratio (OR) $\\theta_{OR} = p_1(1-p_2) / (p_2(1-p_1))$.\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nNote that because the Asymptotic Score methods rely on an iterative root-finding subroutine to identify the confidence limits, the precision of results (and therefore agreement between packages) depends on the tolerance parameter used.\n\n# Data used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `ACT` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects, $p_1$ = 0.2338 (23.38% responders), while for the placebo treatment $p_2$ = 12/77 = 0.1558, giving a risk difference of 0.0779, relative risk 1.50, and odds ratio 1.6525.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 4 × 3\n# Groups: trt [2]\n trt resp n\n \n1 ACT No 118\n2 ACT Yes 36\n3 PBO No 65\n4 PBO Yes 12\n```\n\n\n:::\n:::\n\n\n# Packages\n\nThe table below indicates which methods can be produced using each package, for each contrast. Methods are grouped by those that aim to achieve the nominal confidence interval on average, then the 'exact' and continuity adjusted methods that aim to achieve the nominal confidence level as a minimum. {ExactCIdiff} appears to be the only package offering an 'exact' method for RD, but run times can be prohibitively long, and it is not clear which of the SAS 'exact' methods it matches with, if any.\n\n| | ratesci | contingencytables | DescTools | cicalc | gsDesign | PropCIs | cardx |\n|---------------|--------|--------|--------|--------|--------|--------|--------|\n| For proximate coverage: | | | | | | | |\n| **Wald/log** | RD,RR,OR | RD,RR,OR | RD,RR,OR | RD | \\- | \\- | RD |\n| **Agresti-Caffo** | RD | RD | RD | \\- | \\- | \\- | \\- |\n| **MOVER-Wilson (Newcombe)** | RD,RR,OR | RD,RR,OR | RD | RD | \\- | \\- | \\- |\n| **MOVER-Jeffreys** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| Asymptotic Score methods: | | | | | | | |\n| **Miettinen-Nurminen** | RD,RR,OR | RD,RR,OR | RD | RD | RD,RR,OR | RD | \\- |\n| **Mee/ Koopman** | RD,RR,OR | RD,RR,OR | RD,RR | RD | RD,RR,OR | RR | \\- |\n| **SCAS** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| For conservative coverage: | | | | | | | |\n| **Wald-cc** | RD | RD | RD | RD | \\- | \\- | RD |\n| **MOVER-Wilson-cc** | RD,RR,OR | \\- | RD | RD | \\- | \\- | \\- |\n| **MOVER-Jeffreys-cc** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| **MN-cc** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| **SCAS-cc** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| **'Exact' SS/CZ/AM?** | \\- | OR | \\- | \\- | \\- | \\- | \\- |\n\n**The {DescTools} package** has a function `BinomDiffCI()` which produces CIs for RD using numerous different methods including those indicated above plus Brown-Li(Jeffreys), Hauck-Anderson, and Haldane. See [here](https://search.r-project.org/CRAN/refmans/DescTools/html/BinomDiffCI.html) for more detail. The `BinomRatioCI()` function offers several methods for RR CIs, including the approximate (log)normal and Asymptotic Score (Koopman) methods. The methods available for OR are more limited: `OddsRatio()` only has Wald, MLE and mid-P CI methods.\n\n**The {ratesci} package** has the most extensive coverage of the best-performing methods (Asymptotic Score and MOVER) for all contrasts, including several features not available elsewhere, including the skewness correction for improved symmetrical one-sided coverage (SCAS), corresponding hypothesis tests, an option to apply the MOVER approach with Jeffreys intervals (MOVER-J), and optional 'sliding scale' continuity adjustments across all methods.\n\n**The {PropCIs} package** has functions `diffscoreci()` for the Miettinen-Nurminen (MN) CI for RD, `riskscoreci()` for the Koopman interval for RR, and `orscoreci()` for the MN CI for OR. It also has functions providing Bayesian tail intervals (with user-specified prior).\n\n**The {contingencytables} package** also provides a good selection of different methods.\n\n**The {cicalc} package** in general replicates the methods available in SAS, but only for the RD contrast.\n\n**The {gsDesign} package** gives Asymptotic Score intervals (with or without 'N-1' correction) for all contrasts with the `ciBinomial()` function. The package also has functions for the corresponding hypothesis tests, and sample size calculations.\n\n**The {cardx} package** has very limited options for comparing proportions - the `cardx::ard_stats_prop_test` function only provides for estimation of RD with the Wald approximate normal method (which is not recommended).\n\n```{=html}\n\n```\n\n# Methods for Calculating Confidence Intervals for Proportion Difference from 2 independent samples\n\nThis [paper](https://www.lexjansen.com/wuss/2016/127_Final_Paper_PDF.pdf) describes many methods for the calculation of confidence intervals for 2 independent proportions. The 2-sided and 1-sided performance of many of the same methods have been compared graphically[@laud2014]. For more technical information regarding the methods below see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_2indep_prop.html).\n\n````{=html}\n\n````\n\n### Example code for {DescTools}\n\n\n::: {.cell}\n\n```{.r .cell-code}\nindat1 <- adcibc2 |>\n select(AVAL, TRTP) |>\n mutate(\n resp = if_else(AVAL > 4, \"Yes\", \"No\"),\n respn = if_else(AVAL > 4, 1, 0),\n trt = if_else(TRTP == \"Placebo\", \"PBO\", \"ACT\"),\n trtn = if_else(TRTP == \"Placebo\", 1, 0)\n ) |>\n select(trt, trtn, resp, respn)\n\n# cardx package required a vector with 0 and 1s for a single proportion CI\n# To get the comparison the correct way around Placebo must be 1, and Active 0\n\nindat <- select(indat1, trtn, respn)\n\n# BinomDiffCI requires\n# x1 = successes in active, n1 = total subjects in active,\n# x2 = successes in placebo, n2 = total subjects in placebo\n\nx <- indat |>\n filter(respn == 1) |>\n count(trtn, respn) |>\n pull(n)\nx\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 36 12\n```\n\n\n:::\n\n```{.r .cell-code}\nn <- indat |>\n count(trtn) |>\n pull(n)\nn\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 154 77\n```\n\n\n:::\n\n```{.r .cell-code}\nDescTools::BinomDiffCI(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n conf.level = 0.95,\n sides = c(\"two.sided\"),\n method = c(\n \"wald\",\n \"waldcc\",\n \"score\",\n \"scorecc\",\n \"ac\",\n \"mn\",\n \"mee\",\n \"blj\",\n \"ha\",\n \"hal\",\n \"jp\"\n )\n) |>\n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 11 × 4\n Method est lwr.ci upr.ci\n \n 1 wald 0.0779 -0.0271 0.183\n 2 waldcc 0.0779 -0.0368 0.193\n 3 score 0.0779 -0.0361 0.175\n 4 scorecc 0.0779 -0.0440 0.181\n 5 ac 0.0779 -0.0329 0.178\n 6 mn 0.0779 -0.0361 0.177\n 7 mee 0.0779 -0.0358 0.177\n 8 blj 0.0779 -0.0306 0.181\n 9 ha 0.0779 -0.0342 0.190\n10 hal 0.0779 -0.0314 0.177\n11 jp 0.0779 -0.0321 0.178\n```\n\n\n:::\n:::\n\n\n### Example code for {ratesci}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n contrast = 'RD',\n bcf = TRUE,\n skew = FALSE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$estimates\n lower est upper level x1 n1 x2 n2 p1hat p2hat\n[1,] -0.0360653 0.0779221 0.177495 0.95 36 154 12 77 0.233766 0.155844\n p1mle p2mle\n[1,] 0.233766 0.155844\n\n$pval\n chisq pval2sided theta0 scorenull pval_left pval_right\n[1,] 1.88525 0.169739 0 1.37304 0.91513 0.0848696\n\n$call\n distrib contrast level bcf skew cc \n \"bin\" \"RD\" \"0.95\" \"TRUE\" \"FALSE\" \"FALSE\" \n```\n\n\n:::\n\n```{.r .cell-code}\n# Mee Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n contrast = 'RD',\n bcf = FALSE,\n skew = FALSE\n)$estimates[, 1:3]\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper \n-0.0357985 0.0779221 0.1772878 \n```\n\n\n:::\n\n```{.r .cell-code}\n# Skewness-Corrected Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n contrast = 'RD',\n bcf = TRUE,\n skew = TRUE\n)$estimates[, 1:3]\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper \n-0.0342388 0.0772717 0.1790810 \n```\n\n\n:::\n:::\n\n\n### Example code for {contingencytables}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Tricky to get this the right way round\ntab2x2 <- table(indat1$trt, indat1$resp)[, c(2,1)]\n\ncontingencytables::the_2x2_table_CIs_difference(n = tab2x2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nEstimate of pi_1: 36 / 154 = 0.234\nEstimate of pi_2: 12 / 77 = 0.156\nEstimate of delta = pi_1 - pi_2: 0.078\n\nInterval method 95% CI width\n--------------------------------------------------------------\nWald -0.0271 to 0.1830 0.210\nWald with continuity correction -0.0368 to 0.1927 0.230\nAgresti-Caffo -0.0329 to 0.1782 0.211\nNewcombe hybrid score -0.0361 to 0.1751 0.211\nMee asymptotic score -0.0358 to 0.1773 0.213\nMiettinen-Nurminen asymptotic score -0.0361 to 0.1775 0.214\n--------------------------------------------------------------\n```\n\n\n:::\n:::\n\n\n### Example code for {PropCIs}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen Asymptotic Score method\nPropCIs::diffscoreci( \n x1 = 36,\n n1 = 154,\n x2 = 12,\n n2 = 77,\n conf.level = 0.95\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\n\ndata: \n\n95 percent confidence interval:\n -0.0360653 0.1774952\n```\n\n\n:::\n:::\n\n\n# Methods for Calculating Confidence Intervals for Relative Risk from 2 independent samples\n\nThe 1-sided performance of selected methods have been compared graphically[@laud2017], with the observation that optimum 2-sided coverage follows directly from optimum 1-sided coverage (while the reverse is not true). It has been noted previously that the ratio contrasts suffer a greater imbalance in 1-sided coverage than RD[@gart1990]. Therefore, skewness correction is particularly valuable here.\n\n### Example code for {DescTools}\n\n\n::: {.cell}\n\n```{.r .cell-code}\nDescTools::BinomRatioCI(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n conf.level = 0.95,\n sides = c(\"two.sided\"),\n method = c(\"katz.log\", \"adj.log\", \"bailey\", \"koopman\", \"noether\", \n \"sinh-1\", \"boot\")\n) |>\n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 7 × 4\n Method est lwr.ci upr.ci\n \n1 katz.log 1.5 0.829 2.71\n2 adj.log 1.5 0.819 2.62\n3 bailey 1.5 0.851 2.82\n4 koopman 1.5 0.849 2.73\n5 noether 1.5 0.610 2.39\n6 sinh-1 1.5 0.836 2.69\n7 boot 1.5 0.842 3.07\n```\n\n\n:::\n:::\n\n\n### Example code for {ratesci}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='RR', \n bcf = TRUE,\n skew = FALSE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.848245 1.5 2.73225 0.95 36 154 12 77 0.233766 0.155844 0.233766\n p2mle\n[1,] 0.155844\n```\n\n\n:::\n\n```{.r .cell-code}\n# Koopman Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='RR', \n bcf = FALSE,\n skew = FALSE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.849237 1.5 2.72874 0.95 36 154 12 77 0.233766 0.155844 0.233766\n p2mle\n[1,] 0.155844\n```\n\n\n:::\n\n```{.r .cell-code}\n# Gart-Nam Skewness-Corrected Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='RR', \n bcf = FALSE,\n skew = TRUE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.854006 1.48998 2.83174 0.95 36 154 12 77 0.233766 0.155844 0.233401\n p2mle\n[1,] 0.156648\n```\n\n\n:::\n\n```{.r .cell-code}\n# Skewness-Corrected Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='RR', \n bcf = TRUE,\n skew = TRUE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.852966 1.49002 2.83502 0.95 36 154 12 77 0.233766 0.155844 0.233403\n p2mle\n[1,] 0.156644\n```\n\n\n:::\n:::\n\n\n### Example code for {gsDesign}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen\ngsDesign::ciBinomial(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n scale = 'rr', \n adj = 1\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower upper\n1 0.848245 2.73227\n```\n\n\n:::\n:::\n\n\n### Example code for {contingencytables}\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontingencytables::the_2x2_table_CIs_ratio(n = tab2x2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nEstimate of pi_1: 36 / 154 = 0.234\nEstimate of pi_2: 12 / 77 = 0.156\nEstimate of phi = pi_1 / pi_2: 1.500\n\nInterval method 95% CI Log width\n----------------------------------------------------------------\nKatz log 0.829 to 2.715 1.187\nAdjusted log 0.819 to 2.620 1.163\nPrice-Bonett approximate Bayes 0.831 to 2.695 1.177\nInverse sinh 0.836 to 2.692 1.170\nAdjusted inverse sinh 0.835 to 2.694 1.171\nMOVER-R Wilson 0.847 to 2.715 1.165\nMiettinen-Nurminen asymptotic score 0.848 to 2.732 1.170\nKoopman asymptotic score 0.849 to 2.729 1.167\n----------------------------------------------------------------\n```\n\n\n:::\n:::\n\n\n# Methods for Calculating Confidence Intervals for Odds Ratio from 2 independent samples\n\n### Example code for {ratesci}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen Asymptotic Score method\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='OR', \n bcf = TRUE,\n or_bias = FALSE,\n skew = FALSE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.810258 1.65254 3.36344 0.95 36 154 12 77 0.233766 0.155844 0.233766\n p2mle\n[1,] 0.155844\n```\n\n\n:::\n\n```{.r .cell-code}\n# Asymptotic Score method without 'N-1' adjustment\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='OR', \n bcf = FALSE,\n or_bias = FALSE,\n skew = FALSE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.811482 1.65254 3.35842 0.95 36 154 12 77 0.233766 0.155844 0.233766\n p2mle\n[1,] 0.155844\n```\n\n\n:::\n\n```{.r .cell-code}\n# Skewness-Corrected Asymptotic Score method - includes additional OR bias correction\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='OR', \n bcf = TRUE,\n or_bias = TRUE,\n skew = TRUE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.817594 1.63716 3.48002 0.95 36 154 12 77 0.233766 0.155844 0.233316\n p2mle\n[1,] 0.156745\n```\n\n\n:::\n\n```{.r .cell-code}\n# Gart(1985) Skewness-Corrected Asymptotic Score method - omits the 'N-1' adjustment\nratesci::scoreci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2], \n contrast='OR', \n bcf = FALSE,\n or_bias = TRUE,\n skew = TRUE)$estimates\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower est upper level x1 n1 x2 n2 p1hat p2hat p1mle\n[1,] 0.818872 1.63711 3.4753 0.95 36 154 12 77 0.233766 0.155844 0.233314\n p2mle\n[1,] 0.156748\n```\n\n\n:::\n:::\n\n\n### Example code for {gsDesign}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen\ngsDesign::ciBinomial(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n scale = 'or', \n adj = 1\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower upper\n1 0.810255 3.36353\n```\n\n\n:::\n:::\n\n\n### Example code for {contingencytables}\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontingencytables::the_2x2_table_CIs_OR(n = tab2x2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nEstimate of pi_1: 36 / 154 = 0.234\nEstimate of pi_2: 12 / 77 = 0.156\nEstimate of theta = (pi_1 / (1-pi_1)) / (pi_2 / (1-pi_2)): 1.653\n\nInterval method 95% CI Log width\n----------------------------------------------------------------\nWoolf logit 0.804 to 3.395 1.440\nGart adjusted logit 0.794 to 3.282 1.419\nIndependence-smoothed logit 0.804 to 3.367 1.433\nInverse sinh 0.816 to 3.346 1.411\nAdjusted inverse sinh (0.45, 0.25) 0.803 to 3.259 1.401\nAdjusted inverse sinh (0.6, 0.4) 0.800 to 3.227 1.395\nMOVER-R Wilson 0.872 to 3.336 1.342\nMiettinen-Nurminen asymptotic score 0.810 to 3.363 1.423\nUncorrected asymptotic score 0.811 to 3.358 1.420\nCornfield exact conditional 0.813 to 3.503 1.461\nBaptista-Pike exact conditional 0.796 to 3.326 1.430\nCornfield mid-P 0.813 to 3.503 1.461\nBaptista-Pike mid-P 0.796 to 3.326 1.430\n----------------------------------------------------------------\n```\n\n\n:::\n:::\n\n\n# Continuity Adjusted Methods\n\nThere are relatively few methods widely available for aligning the minimum coverage with the nominal confidence level. The most versatile option is to use functions from the {ratesci} package, which provides optional continuity adjustments, on a sliding scale from 0 to $0.5/N$, for any of the Asymptotic Score or MOVER methods for any contrast.\n\n# Consistency with hypothesis tests\n\n## Test for association\n\nThe Asymptotic Score methods for all contrasts are inherently consistent with $\\chi^2$ tests. What may be less widely known is that there is more than one version of the $\\chi^2$ test. The Mee and Koopman methods (without the 'N-1' variance correction) are consistent with the **Karl** Pearson $\\chi^2$ (as produced by `stats::chisq.test()`) , while the Miettinen-Nurminen method agrees with the **Egon** Pearson 'N-1' test. (The stratified MN interval agrees with the standard CMH test, which also incorporates the 'N-1' adjustment.) Note that the SCAS (with or without 'N-1' adjustment) also agrees with the same tests for association, because the skewness correction term is zero when $\\theta_{RD}=0$ or when $\\theta_{RR}$ or $\\theta_{OR}=1$. The 'N-1' adjusted $\\chi^2$ test is available in the {ratesci} and {gsDesign} packages.\n\n## Non-inferiority test\n\nOne important use for CIs for independent proportions is in the analysis of clinical trials aiming to demonstrate non-inferiority for an outcome such as cure rate or relapse rate. The Asymptotic Score methods are naturally suited for this purpose, as they are derived by inverting a score test statistic. Probably the most well-known named test for such analysis is the Farrington-Manning (FM) test, but it is important to note that the FM formula omits the 'N-1' correction factor, so is consistent with the Mee CI, not the Miettinen-Nurminen CI. Non-inferiority tests including the 'N-1' adjustment can be obtained from the {ratesci} and {gsDesign} functions.\n\n# References\n", - "supporting": [ - "ci_for_2indep_prop_files" - ], + "markdown": "---\ntitle: \"Confidence Intervals for Independent Proportions in R\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n# Introduction\n\nThis page covers confidence intervals for comparisons of two independent proportions in R, including the contrast parameters for risk difference (RD) $\\theta_{RD} = p_1 - p_2$, relative risk (RR) $\\theta_{RR} = p_1 / p_2$, and odds ratio (OR) $\\theta_{OR} = p_1(1-p_2) / (p_2(1-p_1))$.\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nNote that because the Asymptotic Score methods rely on an iterative root-finding subroutine to identify the confidence limits, the precision of results (and therefore agreement between packages) depends on the tolerance parameter used.\n\n# Data used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `ACT` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects, $p_1$ = 0.2338 (23.38% responders), while for the placebo treatment $p_2$ = 12/77 = 0.1558, giving a risk difference of 0.0779, relative risk 1.50, and odds ratio 1.6525.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 4 × 3\n# Groups: trt [2]\n trt resp n\n \n1 ACT No 118\n2 ACT Yes 36\n3 PBO No 65\n4 PBO Yes 12\n```\n\n\n:::\n:::\n\n\n# Packages\n\nThe table below indicates which methods can be produced using each package, for each contrast. Methods are grouped by those that aim to achieve the nominal confidence interval on average, then the 'exact' and continuity adjusted methods that aim to achieve the nominal confidence level as a minimum. {ExactCIdiff} appears to be the only package offering an 'exact' method for RD, but run times can be prohibitively long, and it is not clear which of the SAS 'exact' methods it matches with, if any.\n\n| | ratesci | contingencytables | DescTools | cicalc | gsDesign | PropCIs | cardx |\n|---------|---------|---------|---------|---------|---------|---------|---------|\n| For proximate coverage: | | | | | | | |\n| **Wald/log** | RD,RR,OR | RD,RR,OR | RD,RR,OR | RD | \\- | \\- | RD |\n| **Agresti-Caffo** | RD | RD | RD | \\- | \\- | \\- | \\- |\n| **MOVER-Wilson (Newcombe)** | RD,RR,OR | RD,RR,OR | RD | RD | \\- | \\- | \\- |\n| **MOVER-Jeffreys** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| Asymptotic Score methods: | | | | | | | |\n| **Miettinen-Nurminen** | RD,RR,OR | RD,RR,OR | RD | RD | RD,RR,OR | RD | \\- |\n| **Mee/ Koopman** | RD,RR,OR | RD,RR,OR | RD,RR | RD | RD,RR,OR | RR | \\- |\n| **SCAS** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| For conservative coverage: | | | | | | | |\n| **Wald-cc** | RD | RD | RD | RD | \\- | \\- | RD |\n| **MOVER-Wilson-cc** | RD,RR,OR | \\- | RD | RD | \\- | \\- | \\- |\n| **MOVER-Jeffreys-cc** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| **MN-cc** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n| **SCAS-cc** | RD,RR,OR | \\- | \\- | \\- | \\- | \\- | \\- |\n\n**The {ratesci} package** has the most extensive coverage of the best-performing methods (Asymptotic Score and MOVER) for all contrasts, including several features not available elsewhere, including the skewness correction for improved symmetrical one-sided coverage (SCAS), corresponding hypothesis tests, an option to apply the MOVER approach with Jeffreys intervals (MOVER-J), and optional 'sliding scale' continuity adjustments across all methods. It also produces a selection of other methods for reference. Note, the current development version at has added more functionality (including the convenience functions `rdci()` etc) compared to the CRAN release (v1.0.0). A CRAN update will be released in due course.\n\n**The {DescTools} package** has a function `BinomDiffCI()` which produces CIs for RD using numerous different methods including those indicated above plus Brown-Li(Jeffreys), Hauck-Anderson, and Haldane. See [here](https://search.r-project.org/CRAN/refmans/DescTools/html/BinomDiffCI.html) for more detail. The `BinomRatioCI()` function offers several methods for RR CIs, including the approximate (log)normal and Asymptotic Score (Koopman) methods. The methods available for OR are more limited: `OddsRatio()` only has Wald, MLE and mid-P CI methods.\n\n**The {PropCIs} package** has functions `diffscoreci()` for the Miettinen-Nurminen (MN) CI for RD, `riskscoreci()` for the Koopman interval for RR, and `orscoreci()` for the MN CI for OR. It also has functions providing Bayesian tail intervals (with user-specified prior).\n\n**The {contingencytables} package** also provides a good selection of different methods.\n\n**The {cicalc} package** in general replicates the methods available in SAS, but only for the RD contrast.\n\n**The {gsDesign} package** gives Asymptotic Score intervals (with or without 'N-1' correction) for all contrasts with the `ciBinomial()` function. The package also has functions for the corresponding hypothesis tests, and sample size calculations.\n\n**The {cardx} package** has very limited options for comparing proportions - the `cardx::ard_stats_prop_test` function only provides for estimation of RD with the Wald approximate normal method (which is not recommended).\n\n```{=html}\n\n```\n\n# Proportion Difference\n\nThis [paper](https://www.lexjansen.com/wuss/2016/127_Final_Paper_PDF.pdf) describes many methods for the calculation of confidence intervals for 2 independent proportions. The 2-sided and 1-sided performance of many of the same methods have been compared graphically[@laud2014]. For more technical information regarding the methods below see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_2indep_prop.html).\n\n````{=html}\n\n````\n\n### Example code for {DescTools}\n\n\n::: {.cell}\n\n```{.r .cell-code}\nindat1 <- adcibc2 |>\n select(AVAL, TRTP) |>\n mutate(\n resp = if_else(AVAL > 4, \"Yes\", \"No\"),\n respn = if_else(AVAL > 4, 1, 0),\n trt = if_else(TRTP == \"Placebo\", \"PBO\", \"ACT\"),\n trtn = if_else(TRTP == \"Placebo\", 1, 0)\n ) |>\n select(trt, trtn, resp, respn)\n\n# cardx package required a vector with 0 and 1s for a single proportion CI\n# To get the comparison the correct way around Placebo must be 1, and Active 0\n\nindat <- select(indat1, trtn, respn)\n\n# BinomDiffCI requires\n# x1 = successes in active, n1 = total subjects in active,\n# x2 = successes in placebo, n2 = total subjects in placebo\n\nx <- indat |>\n filter(respn == 1) |>\n count(trtn, respn) |>\n pull(n)\nx\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 36 12\n```\n\n\n:::\n\n```{.r .cell-code}\nn <- indat |>\n count(trtn) |>\n pull(n)\nn\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 154 77\n```\n\n\n:::\n\n```{.r .cell-code}\nDescTools::BinomDiffCI(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n conf.level = 0.95,\n sides = c(\"two.sided\"),\n method = c(\n \"wald\",\n \"waldcc\",\n \"score\",\n \"scorecc\",\n \"ac\",\n \"mn\",\n \"mee\",\n \"blj\",\n \"ha\",\n \"hal\",\n \"jp\"\n )\n) |>\n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 11 × 4\n Method est lwr.ci upr.ci\n \n 1 wald 0.0779 -0.0271 0.183\n 2 waldcc 0.0779 -0.0368 0.193\n 3 score 0.0779 -0.0361 0.175\n 4 scorecc 0.0779 -0.0440 0.181\n 5 ac 0.0779 -0.0329 0.178\n 6 mn 0.0779 -0.0361 0.177\n 7 mee 0.0779 -0.0358 0.177\n 8 blj 0.0779 -0.0306 0.181\n 9 ha 0.0779 -0.0342 0.190\n10 hal 0.0779 -0.0314 0.177\n11 jp 0.0779 -0.0321 0.178\n```\n\n\n:::\n:::\n\n\n### Example code for {ratesci}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Selected methods for proximate coverage\nratesci::rdci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n level = 0.95,\n precis = 6\n)$estimates \n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , 36/154 vs 12/77\n\n lower est upper\nSCAS -0.034239 0.077922 0.179081\nGart-Nam -0.033963 0.077922 0.178881\nMiettinen-Nurminen -0.036065 0.077922 0.177495\nMee -0.035798 0.077922 0.177288\nMOVER Wilson -0.036142 0.077922 0.175125\nMOVER Jeffreys -0.033570 0.077922 0.176023\nWald -0.027108 0.077922 0.182952\nAgresti-Caffo -0.032925 0.077922 0.178170\n```\n\n\n:::\n:::\n\n\n### Example code for {contingencytables}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Tricky to get this the right way round\ntab2x2 <- table(indat1$trt, indat1$resp)[, c(2,1)]\n\ncontingencytables::the_2x2_table_CIs_difference(n = tab2x2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nEstimate of pi_1: 36 / 154 = 0.234\nEstimate of pi_2: 12 / 77 = 0.156\nEstimate of delta = pi_1 - pi_2: 0.078\n\nInterval method 95% CI width\n--------------------------------------------------------------\nWald -0.0271 to 0.1830 0.210\nWald with continuity correction -0.0368 to 0.1927 0.230\nAgresti-Caffo -0.0329 to 0.1782 0.211\nNewcombe hybrid score -0.0361 to 0.1751 0.211\nMee asymptotic score -0.0358 to 0.1773 0.213\nMiettinen-Nurminen asymptotic score -0.0361 to 0.1775 0.214\n--------------------------------------------------------------\n```\n\n\n:::\n:::\n\n\n# Relative Risk\n\nThe 1-sided performance of selected methods have been compared graphically[@laud2017], with the observation that optimum 2-sided coverage follows directly from optimum 1-sided coverage (while the reverse is not true). It has been noted previously that the ratio contrasts suffer a greater imbalance in 1-sided coverage than RD[@gart1990]. Therefore, skewness correction is particularly valuable here.\n\nAnother relatively recent method, not provided by SAS, is the MOVER-R interval, which uses a formula adapted from the MOVER (Newcombe Hybrid Score) method for application to ratio contrasts.\n\n### Example code for {DescTools}\n\n\n::: {.cell}\n\n```{.r .cell-code}\nDescTools::BinomRatioCI(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n conf.level = 0.95,\n sides = c(\"two.sided\"),\n method = c(\"katz.log\", \"adj.log\", \"bailey\", \"koopman\", \"noether\", \n \"sinh-1\", \"boot\")\n) |>\n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 7 × 4\n Method est lwr.ci upr.ci\n \n1 katz.log 1.5 0.829 2.71\n2 adj.log 1.5 0.819 2.62\n3 bailey 1.5 0.851 2.82\n4 koopman 1.5 0.849 2.73\n5 noether 1.5 0.610 2.39\n6 sinh-1 1.5 0.836 2.69\n7 boot 1.5 0.882 2.93\n```\n\n\n:::\n:::\n\n\n### Example code for {ratesci}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Selected methods for proximate coverage\nratesci::rrci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n level = 0.95,\n precis = 6\n)$estimates \n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , 36/154 vs 12/77\n\n lower est upper\nSCAS 0.852966 1.5 2.83502\nGart-Nam 0.854006 1.5 2.83174\nMiettinen-Nurminen 0.848245 1.5 2.73225\nKoopman 0.849237 1.5 2.72874\nMOVER-R Wilson 0.847205 1.5 2.71518\nMOVER-R Jeffreys 0.854776 1.5 2.80156\nKatz log 0.828758 1.5 2.71490\nAdjusted log 0.818876 1.5 2.61996\n```\n\n\n:::\n:::\n\n\n### Example code for {gsDesign}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen\ngsDesign::ciBinomial(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n scale = 'rr', \n adj = 1\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower upper\n1 0.848245 2.73227\n```\n\n\n:::\n:::\n\n\n### Example code for {contingencytables}\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontingencytables::the_2x2_table_CIs_ratio(n = tab2x2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nEstimate of pi_1: 36 / 154 = 0.234\nEstimate of pi_2: 12 / 77 = 0.156\nEstimate of phi = pi_1 / pi_2: 1.500\n\nInterval method 95% CI Log width\n----------------------------------------------------------------\nKatz log 0.829 to 2.715 1.187\nAdjusted log 0.819 to 2.620 1.163\nPrice-Bonett approximate Bayes 0.831 to 2.695 1.177\nInverse sinh 0.836 to 2.692 1.170\nAdjusted inverse sinh 0.835 to 2.694 1.171\nMOVER-R Wilson 0.847 to 2.715 1.165\nMiettinen-Nurminen asymptotic score 0.848 to 2.732 1.170\nKoopman asymptotic score 0.849 to 2.729 1.167\n----------------------------------------------------------------\n```\n\n\n:::\n:::\n\n\n# Odds Ratio\n\n### Example code for {ratesci}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Selected methods for proximate coverage\nratesci::orci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n level = 0.95,\n precis = 6\n)$estimates \n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , 36/154 vs 12/77\n\n lower est upper\nSCAS 0.817594 1.65254 3.48002\nGart Asymptotic Score 0.818872 1.65254 3.47530\nMiettinen-Nurminen 0.810258 1.65254 3.36344\nUncorrected Asymptotic Score 0.811482 1.65254 3.35842\nMOVER-R Wilson 0.806857 1.65254 3.34520\nMOVER-R Jeffreys 0.817280 1.65254 3.44985\nWoolf logit 0.804332 1.65254 3.39523\nGart adjusted logit 0.793782 1.65254 3.28179\n```\n\n\n:::\n:::\n\n\n### Example code for {gsDesign}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Miettinen-Nurminen\ngsDesign::ciBinomial(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n scale = 'or', \n adj = 1\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n lower upper\n1 0.810255 3.36353\n```\n\n\n:::\n:::\n\n\n### Example code for {contingencytables}\n\nNote there is an error in the {contingencytables} package v3.1.0 for the MOVER-R Wilson method.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontingencytables::the_2x2_table_CIs_OR(n = tab2x2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nEstimate of pi_1: 36 / 154 = 0.234\nEstimate of pi_2: 12 / 77 = 0.156\nEstimate of theta = (pi_1 / (1-pi_1)) / (pi_2 / (1-pi_2)): 1.653\n\nInterval method 95% CI Log width\n----------------------------------------------------------------\nWoolf logit 0.804 to 3.395 1.440\nGart adjusted logit 0.794 to 3.282 1.419\nIndependence-smoothed logit 0.804 to 3.367 1.433\nInverse sinh 0.816 to 3.346 1.411\nAdjusted inverse sinh (0.45, 0.25) 0.803 to 3.259 1.401\nAdjusted inverse sinh (0.6, 0.4) 0.800 to 3.227 1.395\nMOVER-R Wilson 0.872 to 3.336 1.342\nMiettinen-Nurminen asymptotic score 0.810 to 3.363 1.423\nUncorrected asymptotic score 0.811 to 3.358 1.420\nCornfield exact conditional 0.813 to 3.503 1.461\nBaptista-Pike exact conditional 0.796 to 3.326 1.430\nCornfield mid-P 0.813 to 3.503 1.461\nBaptista-Pike mid-P 0.796 to 3.326 1.430\n----------------------------------------------------------------\n```\n\n\n:::\n:::\n\n\n# Continuity Adjusted Methods\n\nThere are relatively few methods widely available for aligning the minimum coverage with the nominal confidence level. The most versatile option is to use functions from the {ratesci} package, which provides optional continuity adjustments, on a sliding scale from 0 to $0.5/N$, for any of the Asymptotic Score or MOVER methods for any contrast.\n\n### Example code for {ratesci}\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Selected methods for conservative coverage\n# Using the conventional 0.5\nratesci::rdci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n level = 0.95,\n cc = TRUE,\n precis = 6\n)$estimates \n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , 36/154 vs 12/77\n\n lower est upper\nContinuity adjusted SCAS -0.041280 0.077922 0.185316\nContinuity adjusted Gart-Nam -0.041003 0.077922 0.185116\nContinuity adjusted Miettinen-Nurminen -0.043077 0.077922 0.183742\nContinuity adjusted Mee -0.042809 0.077922 0.183535\nContinuity adjusted MOVER Wilson -0.043962 0.077922 0.180990\nContinuity adjusted MOVER Jeffreys -0.041446 0.077922 0.181965\nContinuity adjusted Wald -0.036848 0.077922 0.192692\nContinuity adjusted Hauck-Anderson -0.034150 0.077922 0.189994\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Using an intermediate adjustment of magnitude 0.25\nratesci::rdci(\n x1 = x[1],\n n1 = n[1],\n x2 = x[2],\n n2 = n[2],\n level = 0.95,\n cc = 0.25,\n precis = 6\n)$estimates \n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , 36/154 vs 12/77\n\n lower est upper\nContinuity adjusted SCAS -0.037760 0.077922 0.182197\nContinuity adjusted Gart-Nam -0.037484 0.077922 0.181997\nContinuity adjusted Miettinen-Nurminen -0.039572 0.077922 0.180617\nContinuity adjusted Mee -0.039305 0.077922 0.180410\nContinuity adjusted MOVER Wilson -0.040052 0.077922 0.178057\nContinuity adjusted MOVER Jeffreys -0.037510 0.077922 0.178998\nContinuity adjusted Wald -0.031978 0.077922 0.187822\n```\n\n\n:::\n:::\n\n\n# Consistency with hypothesis tests\n\n## Test for association\n\nThe Asymptotic Score methods for all contrasts are inherently consistent with $\\chi^2$ tests. What may be less widely known is that there is more than one version of the $\\chi^2$ test. The Mee and Koopman methods (without the 'N-1' variance correction) are consistent with the **Karl** Pearson $\\chi^2$ (as produced by `stats::chisq.test()`) , while the Miettinen-Nurminen method agrees with the **Egon** Pearson 'N-1' test. (The stratified MN interval agrees with the standard CMH test, which also incorporates the 'N-1' adjustment.) Note that the SCAS (with or without 'N-1' adjustment) also agrees with the same tests for association, because the skewness correction term is zero when $\\theta_{RD}=0$ or when $\\theta_{RR}$ or $\\theta_{OR}=1$. The 'N-1' adjusted $\\chi^2$ test is available in the {ratesci} and {gsDesign} packages.\n\n## Non-inferiority test\n\nOne important use for CIs for independent proportions is in the analysis of clinical trials aiming to demonstrate non-inferiority for an outcome such as cure rate or relapse rate. The Asymptotic Score methods are naturally suited for this purpose, as they are derived by inverting a score test statistic. Probably the most well-known named test for such analysis is the Farrington-Manning (FM) test, but it is important to note that the FM formula omits the 'N-1' correction factor, so is consistent with the Mee CI, not the Miettinen-Nurminen CI. Non-inferiority tests including the 'N-1' adjustment can be obtained from the {ratesci} and {gsDesign} functions.\n\n# References\n", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/R/ci_for_paired_prop/execute-results/html.json b/_freeze/R/ci_for_paired_prop/execute-results/html.json index 6053a8993..c47821772 100644 --- a/_freeze/R/ci_for_paired_prop/execute-results/html.json +++ b/_freeze/R/ci_for_paired_prop/execute-results/html.json @@ -3,7 +3,9 @@ "result": { "engine": "knitr", "markdown": "---\ntitle: \"Confidence Intervals for Paired Proportions in R\"\n---\n\n## Introduction\n\n\\[See separate page for general introductory information on confidence intervals for proportions.\\]\n\n\\[Note: information about cicalc package will be added to this page soon.\\]\n\n## Data used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `ACT` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects = 0.234 (23.4% responders).\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 4 × 3\n# Groups: trt [2]\n trt resp n\n \n1 ACT No 118\n2 ACT Yes 36\n3 PBO No 65\n4 PBO Yes 12\n```\n\n\n:::\n:::\n\n\n## Packages\n\n**The {ratesci} package** is ... \\[TBC\\]\n\n**The {ExactCIdiff} package** produces exact CIs for two dependent proportions (matched pairs).\n\n## Methods for Calculating Confidence Intervals for Proportion Difference from matched pairs using {ExactCIdiff} and {ratesci}\n\nFor more information about the detailed methods for calculating confidence intervals for a matched pair proportion see [here](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html#methods-for-calculating-confidence-intervals-for-a-matched-pair-proportion). When you have 2 measurements on the same subject, the 2 sets of measures are not independent and you have matched pair of responses.\n\nTo date we have not found an R package which calculates a CI for matched pair proportions using the normal approximation or Wilson methods although they can be done by hand using the equations provided on the SAS page link above.\n\n**The {ExactCIdiff} package** produces exact CIs for two dependent proportions (matched pairs), claiming to be the first package in R to do this method. However, it should only be used when the sample size is not too large as it can be computationally intensive.\\\nNOTE that the {ExactNumCI} package should not be used for this task. More detail on these two packages can be found [here](RJ-2013-026.pdf).\n\nUsing a cross over study as our example, a 2 x 2 table can be formed as follows:\n\n+-----------------------+---------------+---------------+-------------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+=============+\n| Active Response = Yes | r | s | r+s |\n+-----------------------+---------------+---------------+-------------+\n| Active Response = No | t | u | t+u |\n+-----------------------+---------------+---------------+-------------+\n| Total | r+t | s+u | N = r+s+t+u |\n+-----------------------+---------------+---------------+-------------+\n\n: The proportions of subjects responding on each treatment are:\n\nActive: $\\hat p_1 = (r+s)/n$ and Placebo: $\\hat p_2= (r+t)/n$\n\nDifference between the proportions for each treatment are: $D=p1-p2=(s-t)/n$\n\nSuppose :\n\n+-----------------------+---------------+---------------+------------------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+==================+\n| Active Response = Yes | r = 20 | s = 15 | r+s = 35 |\n+-----------------------+---------------+---------------+------------------+\n| Active Response = No | t = 6 | u = 5 | t+u = 11 |\n+-----------------------+---------------+---------------+------------------+\n| Total | r+t = 26 | s+u = 20 | N = r+s+t+u = 46 |\n+-----------------------+---------------+---------------+------------------+\n\nActive: $\\hat p_1 = (r+s)/n$ =35/46 =0.761 and Placebo: $\\hat p_2= (r+t)/n$ = 26/46 =0.565\n\nDifference = 0.761-0.565 = 0.196, then PairedCI() function can provide an exact confidence interval as shown below\n\n-0.00339 to 0.38065\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# ExactCIdiff::PairedCI(s, r+u, t, conf.level = 0.95)\n\nCI <- ExactCIdiff::PairedCI(15, 25, 6, conf.level = 0.95)$ExactCI\nCI\n```\n:::\n\n\n## Methods for Calculating Confidence Intervals for Relative Risk from matched pairs using {ratesci}\n\n\\[TBC\\]\n\n## Methods for Calculating Confidence Intervals for Conditional Odds Ratio from matched pairs using {ratesci}\n\n\\[TBC\\]\n\n## Continuity Adjusted Methods\n\n\\[TBC\\]\n\n## Consistency with Hypothesis Tests\n\n\\[TBC\\] - cf. McNemar test\n\n## \n\n## \n\n## References\n\n1. [pharmaverse cardx package](https://insightsengineering.github.io/cardx/main/#:~:text=The%20%7Bcardx%7D%20package%20is%20an%20extension%20of%20the,Data%20Objects%20%28ARDs%29%20using%20the%20R%20programming%20language.)\n2. [PropCIs package](https://cran.r-project.org/web//packages/PropCIs/PropCIs.pdf)\n3. D. Altman, D. Machin, T. Bryant, M. Gardner (eds). Statistics with Confidence: Confidence Intervals and Statistical Guidelines, 2nd edition. John Wiley and Sons 2000.\n4. \n", - "supporting": [], + "supporting": [ + "ci_for_paired_prop_files" + ], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/R/ci_for_prop/execute-results/html.json b/_freeze/R/ci_for_prop/execute-results/html.json index bbf7b1947..f699f62ed 100644 --- a/_freeze/R/ci_for_prop/execute-results/html.json +++ b/_freeze/R/ci_for_prop/execute-results/html.json @@ -1,11 +1,9 @@ { - "hash": "8c489588a01445fa9b0ed18024927c02", + "hash": "bedefe1a89fdc3b8e9579bbc0542b3eb", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Confidence Intervals for a Proportion in R\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n# Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods, and a summary of R packages available.\n\n\\[Note: information about cicalc package will be added to this page soon.\\]\n\n# Data used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `ACT` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects = 0.234 (23.4% responders).\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 4 × 3\n# Groups: trt [2]\n trt resp n\n \n1 ACT No 118\n2 ACT Yes 36\n3 PBO No 65\n4 PBO Yes 12\n```\n\n\n:::\n:::\n\n\n# Packages\n\nThe table below indicates which methods can be produced using each package. Methods are grouped by those that aim to achieve the nominal confidence interval on average, then the 'exact' and continuity adjusted methods that aim to achieve the nominal confidence level as a minimum.\n\n| | cardx | PropCIs | ratesci | DescTools | cicalc | Hmisc | RBesT |\n|---------|---------|---------|---------|---------|---------|---------|---------|\n| For proximate coverage: | | | | | | | |\n| **Wald** | Y | Y | Y | Y | Y | Y | \\- |\n| **Agresti-Coull** | Y | Y | Y | Y | Y | \\- | \\- |\n| **Jeffreys** | Y | \\- | Y | Y | Y | \\- | \\- |\n| **Mid-P** | \\- | Y | Y | Y | Y | \\- | \\- |\n| **Wilson** | Y | Y | Y | Y | Y | Y | \\- |\n| **SCAS** | \\- | \\- | Y | \\- | \\- | \\- | \\- |\n| For conservative coverage: | | | | | | | |\n| **Wald-cc** | Y | \\- | Y | Y | Y | \\- | \\- |\n| **Jeffreys-cc** | \\- | \\- | Y | \\- | \\- | \\- | \\- |\n| **Clopper-Pearson** | Y | Y | Y | Y | Y | Y | Y |\n| **Wilson-cc** | Y | \\- | Y | \\- | Y | \\- | \\- |\n| **SCAS-cc** | \\- | \\- | Y | \\- | \\- | \\- | \\- |\n| **Blaker** | \\- | Y | Y | Y | \\- | \\- | \\- |\n\nMost of these packages require just the number of events (numerator number of successes) & total number of subjects (denominator) as an input dataset. The exception is {cardx}, which takes input data as one row per individual.\n\n**The {cardx} package** includes a separate function for each method. Alternatively, you can use `cardx::ard_categorical_ci(data = act, variables = respn, method ='wilson')` for example. This invokes the code for the selected method but returns an analysis results dataset (ARD) format as the output.\n\n**The {PropCIs} package** contains separate functions to produce CIs for various methods, including Blaker's 'exact' method and Mid-P which aren't available in {cardx} but are produced by SAS PROC FREQ. We found results agreed with SAS to the 5th decimal place. For methods given by both {PropCIs} and {cardx}, the results generally aligned to at least the 7th decimal place, but some methods (Wilson & mid-P) are only output with 4 dps.\n\n**The {ratesci} package** (`rateci()` function) produces a variety of CIs for a proportion - note, the current development version at has added more methods compared to the CRAN release (v1.0.0). A CRAN update will be released in due course.\n\n**The {DescTools} package** has a function `BinomCI()` that produces a wide selection of methods. It has\n\n**The {cicalc} package** ... \\[TBC\\]\n\n**In the {Hmisc} package,** `binconf()` produces CIs using either the Clopper-Pearson, Wilson or Wald method. In this example (x=36 and n=154), the results match the cardx package.\n\n```{=html}\n\n```\n\n**The {RBesT} package** produces CIs using the Clopper-Pearson method with the `BinaryExactCI()` function. (Prior to version 1.8-0, the function produced erroneous results for boundary cases where x = 0 or x = N).\n\nAnother package producing many of the above intervals is **{contingencytables}**, but the format of its output objects is not very easy to work with.\n\n# Methods for Calculating Confidence Intervals for a single proportion using R\n\nFor more technical derivation and characteristics of each of the methods listed below, see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html).\n\nMost of the methods are demonstrated below using the {cardx} package.\n\n## Clopper-Pearson (Exact or binomial CI) Method\n\nThe Clopper-Pearson 'Exact' CI is one of the most popular methods - it is often a preferred 'safe option' to avoid the known deficiencies of the Wald method and guarantee coverage of at least the nominal confidence level, but it can be excessively conservative (too wide an interval compared to the interval containing the true population proportion 95% of the time).\n\nThe {cardx} package calculates the Clopper-Pearson interval by calling the `stats::binom.test()` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncardx::proportion_ci_clopper_pearson(act2, conf.level = 0.95) |>\n as_tibble() |>\n select(-(statistic:parameter))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 8\n N n conf.level estimate conf.low conf.high method alternative\n \n1 154 36 0.95 0.234 0.169 0.309 Clopper-Pearso… two.sided \n```\n\n\n:::\n:::\n\n\n## Normal Approximation Method (Also known as the Wald Method)\n\nThe following code calculates a confidence interval for a binomial proportion using the normal approximation equation manually. This is replicated exactly using the `cardx::proportion_ci_wald function` which also allows the continuity correction to be applied.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# sample proportion by trt\nsummary <- adcibc |>\n filter(trt == \"ACT\") |>\n group_by(resp) |>\n tally() |>\n ungroup() |>\n mutate(\n total = sum(n),\n p = n / total\n )\n\n# Calculate standard error and 95% wald confidence intervals for population proportion\nwaldci <- summary |>\n filter(resp == \"Yes\") |>\n mutate(\n se = sqrt(p * (1 - p) / total),\n lower_ci = (p - qnorm(1 - 0.05 / 2) * se),\n upper_ci = (p + qnorm(1 - 0.05 / 2) * se)\n )\nwaldci\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n resp n total p se lower_ci upper_ci\n \n1 Yes 36 154 0.234 0.0341 0.167 0.301\n```\n\n\n:::\n\n```{.r .cell-code}\n# cardx package Wald method without continuity correction\ncardx::proportion_ci_wald(act2, conf.level = 0.95, correct = FALSE) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.167 0.301 0.95 Wald Confidence Interval w…\n```\n\n\n:::\n:::\n\n\n## Wilson Method (Also known as the (Asymptotic) Score method)\n\nThe cardx package calculates the Wilson (score) method by calling the `stats::prop.test()` function. This method is often used as a compromise between the Clopper-Pearson and the Wald given it was found to be accurate for most parameter values (even those close to 0 and 1), and it does not suffer from being over-conservative. However, it over-corrects the asymmetric one-sided coverage of Wald - the location of the Wilson interval is shifted too far towards 0.5[@tonycai2005]. For more technical information see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package Wilson method without continuity correction\ncardx::proportion_ci_wilson(act2, conf.level = 0.95, correct = FALSE) |>\n as_tibble() |>\n select(-(statistic:parameter))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 8\n N n conf.level estimate conf.low conf.high method alternative\n \n1 154 36 0.95 0.234 0.174 0.307 Wilson Confide… two.sided \n```\n\n\n:::\n:::\n\n\nA recent improvement to the Wilson Score method introduces a skewness correction in order to address the asymmetric one-sided coverage probabilities[@laud2017]. The resulting Skewness Corrected Asymptotic Score (SCAS) method can be obtained from the {ratesci} package. Note in this example the interval is shifted downwards by about 0.0015.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::rateci(x = 36, n = 154)$scas |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 5\n lower est upper x n\n \n1 0.172 0.234 0.305 36 154\n```\n\n\n:::\n:::\n\n\n## Agresti-Coull Method\n\nThe {cardx} package calculates the Agresti-Coull method using the equation from the published method by Alan Agresti & Brent Coull based on adding 2 successes and 2 failures before computing the wald CI. The CI is truncated, when it overshoots the boundary (\\<0 or \\>1).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package agresti_coull method\ncardx::proportion_ci_agresti_coull(act2, conf.level = 0.95) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.174 0.307 0.95 Agresti-Coull Confidence I…\n```\n\n\n:::\n:::\n\n\n## Jeffreys Method\n\nThe Jeffreys 'equal-tailed' method is a particular type of Bayesian method, which optimises central location instead of Highest Probability Density (HPD)[@brown2001]. For proportions, the beta distribution is generally used for the prior, which consists of two parameters alpha and beta. Setting alpha=beta=0.5 is called the Jeffreys prior. NOTE: if you want to use any other priors you can use the `ratesci:jeffreysci()` function which allows a different beta prior to be specified using the `ai` and `bi` arguments. Alternatively (and if you prefer to opt for an HPD approach not recommended by Brown et al.), you can use `binom::binom.bayes`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package jeffreys method\ncardx::proportion_ci_jeffreys(act2, conf.level = 0.95) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.172 0.305 0.95 Jeffreys Interval\n```\n\n\n:::\n:::\n\n\n## Binomial based mid-P Method\n\nThe table below (from {ratesci}) includes two versions of the mid-P interval, one derived iteratively from binomial tail proportions and another from quantiles of a beta distribution. These have been claimed to be equivalent[@brown2001], but it seems they are not quite identical - the beta version is very close to the Jeffreys interval. The first version closely matches with the result (0.17200131, 0.30545883) from SAS PROC FREQ:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::rateci(x = 36, n = 154, precis = 10)$ciarray[,,1] |>\n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 7 × 6\n Method lower est upper x n\n \n1 SCAS 0.172 0.234 0.305 36 154\n2 Jeffreys 0.172 0.234 0.305 36 154\n3 mid-p 0.172 0.234 0.305 36 154\n4 mid-p(beta) 0.172 0.234 0.305 36 154\n5 Wilson 0.174 0.234 0.307 36 154\n6 Wald 0.167 0.234 0.301 36 154\n7 Agresti-Coull 0.174 0.234 0.307 36 154\n```\n\n\n:::\n:::\n\n\nThe mid-P result from {DescTools} doesn't quite match with either of the above results, beyond the 4th decimal place, even with increased precision using the `tol` argument (which according to the documentation only affects the Blaker method). The documentation indicates that the mid-P interval is derived from the F-distribution, which suggests there are actually 3 slightly different versions of 'mid-P'.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nDescTools::BinomCI(x = 36, n = 154, \n tol = 1e-10,\n method = c(\"jeffreys\", \"wilson\", \"midp\", \"clopper-pearson\")) |> \n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 4 × 4\n Method est lwr.ci upr.ci\n \n1 jeffreys 0.234 0.172 0.305\n2 wilson 0.234 0.174 0.307\n3 midp 0.234 0.172 0.305\n4 clopper-pearson 0.234 0.169 0.309\n```\n\n\n:::\n:::\n\n\nThe PropCIs version only displays 4dps, because the calculations are performed on sequential values of p at intervals of 0.0005. Therefore the 4th decimal place shown is only ever 1 or 6, and the PropCIs output for this method is only reliable up to the 3rd decimal place. (A similar precision issue is also noted for the `exactci::binom.exact(..., midp = TRUE)` function, which is probably due to reliance on `uniroot()` with its default value for the `tol` argument.)\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPropCIs::midPci(x = 36, n = 154, conf.level = 0.95) \n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\n\ndata: \n\n95 percent confidence interval:\n 0.1721 0.3051\n```\n\n\n:::\n:::\n\n\n## Blaker Method\n\nThe Blaker 'exact' CI matching SAS output (to at least 7 dps) may be obtained as follows from {ratesci}, or from the dedicated {BlakerCI} package. Output from `DescTools::BinomCI` and `PropCIs::blakerci()` is less precise, matching to 4 decimal places.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits=8)\n# ratesci package Blaker method \nratesci::rateci(36, 154, cc = TRUE)$blaker |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 5\n lower est upper x n\n \n1 0.170 0.234 0.307 36 154\n```\n\n\n:::\n:::\n\n\n# Continuity Adjusted Methods\n\nContinuity adjustments (at a magnitude of $\\gamma / N$ where conventionally $\\gamma = 0.5$ but smaller values may be used) can be applied to the Wald, Wilson and SCAS formulae.\n\nHere is an example of the adjusted Wilson interval from {cardx}:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package Wilson method with continuity correction\n\ncardx::proportion_ci_wilson(act2, conf.level = 0.95, correct = TRUE) |> \n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 11\n N n conf.level estimate statistic p.value parameter conf.low\n \n1 154 36 0.95 0.234 42.6 6.70e-11 1 0.171\n# ℹ 3 more variables: conf.high , method , alternative \n```\n\n\n:::\n:::\n\n\nIn principle, the concept can also be applied to the Jeffreys CI, although with $\\gamma=0.5$ this turns out to be identical to the Clopper-Pearson interval.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::rateci(x = 36, n = 154, cc = 0.5)$ciarray |>\n drop() |>\n as_tibble(rownames=\"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 7 × 6\n Method lower est upper x n\n \n1 SCAS_cc 0.169 0.234 0.309 36 154\n2 Jeffreys_cc 0.169 0.234 0.309 36 154\n3 Clopper-Pearson 0.169 0.234 0.309 36 154\n4 CP(beta) 0.169 0.234 0.309 36 154\n5 Wilson_cc 0.171 0.234 0.310 36 154\n6 Wald_cc 0.164 0.234 0.304 36 154\n7 Blaker 0.170 0.234 0.307 36 154\n```\n\n\n:::\n:::\n\n\n# Consistency with hypothesis tests\n\nThe consistency of CIs for a proportion with a hypothesis test depends on what test is specified. Some confidence interval methods are derived by inverting a test, and therefore guarantee consistency. This includes Clopper Pearson (exact binomial test from `prop.test()`), Wilson Score (asymptotic binomial test from `binom.test()` using the standard error estimated under the null hypothesis), and mid-P (exact test with mid-P adjustment, e.g. from `exactci::binom.exact(..., midp = TRUE)`), although given the observations above, it would be important to select the right version of the mid-P interval. For the SCAS method, the CI is consistent with a skewness-corrected version of the asymptotic test (output by the `ratesci::scoreci()` function).\n\nThe output from some of the {cardx} functions (e.g. `proportion_ci_wilson()`) and `proportion_ci_clopperpearson()` includes a p-value, which is not mentioned in the package documentation. It is a test against a null hypothesis of p=0.5, which is the default value used by the underlying `binom.test` and `prop.test` functions. If a test against a specified value of p was required, then those underlying functions should be used to obtain the p-value, as there is no facility to select a different null value in the {cardx} functions.\n\n# Stratified Score methods\n\n\\[To be moved to separate page for stratified analysis?\\]\n\n{cardx} also contains a function `proportion_ci_strat_wilson()` which calculates stratified Wilson CIs for unequal proportions[@yan2010], with or without continuity adjustment, using a combination of the CIs from each stratum. The default weights (not clearly documented) are presumably those described in the publication, which aim to minimise the weighted sum of the squared interval lengths. The function has the facility to specify other weights (such as proportional to stratum size, for example, or adjusted for known population weights). Examples shown use the `adcibc2` dataset, stratified by age group.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nactstrat <- adcibc2 |>\n select(TRTP, AVAL, AGEGR1) |>\n filter(TRTP != \"Placebo\") |>\n mutate(\n respn = if_else(AVAL > 4, 1, 0),\n agegrp = factor(AGEGR1)\n ) |>\n select(respn, agegrp)\nactage <- actstrat$agegrp\n\ntable(actage, act2) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 6 × 3\n actage act2 n\n \n1 <65 0 13\n2 >80 0 32\n3 65-80 0 73\n4 <65 1 4\n5 >80 1 11\n6 65-80 1 21\n```\n\n\n:::\n\n```{.r .cell-code}\ntable(actage) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 3 × 2\n actage n\n \n1 <65 17\n2 >80 43\n3 65-80 94\n```\n\n\n:::\n\n```{.r .cell-code}\ncardx::proportion_ci_strat_wilson(x = act2, strata = actage) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 3 × 8\n N n estimate conf.low conf.high conf.level weights method \n \n1 154 36 0.234 0.174 0.307 0.95 0.113 Stratified Wilson …\n2 154 36 0.234 0.174 0.307 0.95 0.263 Stratified Wilson …\n3 154 36 0.234 0.174 0.307 0.95 0.623 Stratified Wilson …\n```\n\n\n:::\n\n```{.r .cell-code}\ncardx::proportion_ci_strat_wilson(x = act2, strata = actage, weights = c(1/3, 1/3, 1/3)) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.166 0.332 0.95 Stratified Wilson Confiden…\n```\n\n\n:::\n:::\n\n\nThe {ratesci} package can also produce stratified Wilson or SCAS intervals (with or without continuity adjustment), though the underlying methodology is different - this version uses a stratified score function[@laud2017] (analogous to the Miettinen-Nurminen formula for stratified risk difference) instead of constructing the interval from a CI calculated separately for each stratum. For stratified analysis, the `ratesci::scoreci()` function takes inputs in the form of vectors of the numerators and denominators per stratum. By default, weights are proportional to stratum size, but custom weights are also catered for using the `wt` argument. Arbitrary fixed weights are shown here to allow comparison with the {cardx} version.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::scoreci(x1 = c(4, 21, 11), \n n1 = c(17, 94, 43), \n contrast = 'p', \n skew = FALSE, \n stratified = TRUE)$estimates |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n lower est upper level p1hat p1mle\n \n1 0.174 0.234 0.307 0.95 0.234 0.234\n```\n\n\n:::\n\n```{.r .cell-code}\nratesci::scoreci(x1 = c(4, 21, 11), \n n1 = c(17, 94, 43), \n wt = c(1,1,1), \n contrast = 'p', \n skew = FALSE, \n stratified = TRUE)$estimates |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n lower est upper level p1hat p1mle\n \n1 0.164 0.238 0.332 0.95 0.238 0.238\n```\n\n\n:::\n:::\n\n\n# References\n", - "supporting": [ - "ci_for_prop_files" - ], + "markdown": "---\ntitle: \"Confidence Intervals for a Proportion in R\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n# Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods, and a summary of R packages available.\n\n\\[Note: information about cicalc package will be added to this page soon.\\]\n\n# Data used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `ACT` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects = 0.234 (23.4% responders).\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 4 × 3\n# Groups: trt [2]\n trt resp n\n \n1 ACT No 118\n2 ACT Yes 36\n3 PBO No 65\n4 PBO Yes 12\n```\n\n\n:::\n:::\n\n\n# Packages\n\nThe table below indicates which methods can be produced using each package. Methods are grouped by those that aim to achieve the nominal confidence interval on average, then the 'exact' and continuity adjusted methods that aim to achieve the nominal confidence level as a minimum.\n\n| | cardx | PropCIs | ratesci | DescTools | cicalc | Hmisc | RBesT |\n|----------------------------|-------|---------|---------|-----------|--------|-------|-------|\n| For proximate coverage: | | | | | | | |\n| **Wald** | Y | Y | Y | Y | Y | Y | \\- |\n| **Agresti-Coull** | Y | Y | Y | Y | Y | \\- | \\- |\n| **Jeffreys** | Y | \\- | Y | Y | Y | \\- | \\- |\n| **Mid-P** | \\- | Y | Y | Y | Y | \\- | \\- |\n| **Wilson** | Y | Y | Y | Y | Y | Y | \\- |\n| **SCAS** | \\- | \\- | Y | \\- | \\- | \\- | \\- |\n| For conservative coverage: | | | | | | | |\n| **Wald-cc** | Y | \\- | Y | Y | Y | \\- | \\- |\n| **Jeffreys-cc** | \\- | \\- | Y | \\- | \\- | \\- | \\- |\n| **Clopper-Pearson** | Y | Y | Y | Y | Y | Y | Y |\n| **Wilson-cc** | Y | \\- | Y | \\- | Y | \\- | \\- |\n| **SCAS-cc** | \\- | \\- | Y | \\- | \\- | \\- | \\- |\n| **Blaker** | \\- | Y | Y | Y | \\- | \\- | \\- |\n\nMost of these packages require just the number of events (numerator number of successes) & total number of subjects (denominator) as an input dataset. The exception is {cardx}, which takes input data as one row per individual.\n\n**The {cardx} package** includes a separate function for each method. Alternatively, you can use `cardx::ard_categorical_ci(data = act, variables = respn, method ='wilson')` for example. This invokes the code for the selected method but returns an analysis results dataset (ARD) format as the output.\n\n**The {PropCIs} package** contains separate functions to produce CIs for various methods, including Blaker's 'exact' method and Mid-P which aren't available in {cardx} but are produced by SAS PROC FREQ. We found results agreed with SAS to the 5th decimal place. For methods given by both {PropCIs} and {cardx}, the results generally aligned to at least the 7th decimal place, but some methods (Wilson & mid-P) are only output with 4 dps.\n\n**The {ratesci} package** (`rateci()` function) produces a variety of CIs for a proportion - note, the current development version at has added more methods compared to the CRAN release (v1.0.0). A CRAN update will be released in due course.\n\n**The {DescTools} package** has a function `BinomCI()` that produces a wide selection of methods. It has\n\n**The {cicalc} package** ... \\[TBC\\]\n\n**In the {Hmisc} package,** `binconf()` produces CIs using either the Clopper-Pearson, Wilson or Wald method. In this example (x=36 and n=154), the results match the cardx package.\n\n```{=html}\n\n```\n\n**The {RBesT} package** produces CIs using the Clopper-Pearson method with the `BinaryExactCI()` function. (Prior to version 1.8-0, the function produced erroneous results for boundary cases where x = 0 or x = N).\n\nAnother package producing many of the above intervals is **{contingencytables}**, but the format of its output objects is not very easy to work with.\n\n# Methods for Calculating Confidence Intervals for a single proportion using R\n\nFor more technical derivation and characteristics of each of the methods listed below, see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html).\n\nMost of the methods are demonstrated below using the {cardx} package.\n\n## Clopper-Pearson (Exact or binomial CI) Method\n\nThe Clopper-Pearson 'Exact' CI is one of the most popular methods - it is often a preferred 'safe option' to avoid the known deficiencies of the Wald method and guarantee coverage of at least the nominal confidence level, but it can be excessively conservative (too wide an interval compared to the interval containing the true population proportion 95% of the time).\n\nThe {cardx} package calculates the Clopper-Pearson interval by calling the `stats::binom.test()` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncardx::proportion_ci_clopper_pearson(act2, conf.level = 0.95) |>\n as_tibble() |>\n select(-(statistic:parameter))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 8\n N n conf.level estimate conf.low conf.high method alternative\n \n1 154 36 0.95 0.234 0.169 0.309 Clopper-Pearso… two.sided \n```\n\n\n:::\n:::\n\n\n## Normal Approximation Method (Also known as the Wald Method)\n\nThe following code calculates a confidence interval for a binomial proportion using the normal approximation equation manually. This is replicated exactly using the `cardx::proportion_ci_wald function` which also allows the continuity correction to be applied.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# sample proportion by trt\nsummary <- adcibc |>\n filter(trt == \"ACT\") |>\n group_by(resp) |>\n tally() |>\n ungroup() |>\n mutate(\n total = sum(n),\n p = n / total\n )\n\n# Calculate standard error and 95% wald confidence intervals for population proportion\nwaldci <- summary |>\n filter(resp == \"Yes\") |>\n mutate(\n se = sqrt(p * (1 - p) / total),\n lower_ci = (p - qnorm(1 - 0.05 / 2) * se),\n upper_ci = (p + qnorm(1 - 0.05 / 2) * se)\n )\nwaldci\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n resp n total p se lower_ci upper_ci\n \n1 Yes 36 154 0.234 0.0341 0.167 0.301\n```\n\n\n:::\n\n```{.r .cell-code}\n# cardx package Wald method without continuity correction\ncardx::proportion_ci_wald(act2, conf.level = 0.95, correct = FALSE) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.167 0.301 0.95 Wald Confidence Interval w…\n```\n\n\n:::\n:::\n\n\n## Wilson Method (Also known as the (Asymptotic) Score method)\n\nThe cardx package calculates the Wilson (score) method by calling the `stats::prop.test()` function. This method is often used as a compromise between the Clopper-Pearson and the Wald given it was found to be accurate for most parameter values (even those close to 0 and 1), and it does not suffer from being over-conservative. However, it over-corrects the asymmetric one-sided coverage of Wald - the location of the Wilson interval is shifted too far towards 0.5[@tonycai2005]. For more technical information see the corresponding [SAS page](https://psiaims.github.io/CAMIS/SAS/ci_for_prop.html).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package Wilson method without continuity correction\ncardx::proportion_ci_wilson(act2, conf.level = 0.95, correct = FALSE) |>\n as_tibble() |>\n select(-(statistic:parameter))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 8\n N n conf.level estimate conf.low conf.high method alternative\n \n1 154 36 0.95 0.234 0.174 0.307 Wilson Confide… two.sided \n```\n\n\n:::\n:::\n\n\nA recent improvement to the Wilson Score method introduces a skewness correction in order to address the asymmetric one-sided coverage probabilities[@laud2017]. The resulting Skewness Corrected Asymptotic Score (SCAS) method can be obtained from the {ratesci} package. Note in this example the interval is shifted downwards by about 0.0015.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::rateci(x = 36, n = 154)$scas |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 5\n lower est upper x n\n \n1 0.172 0.234 0.305 36 154\n```\n\n\n:::\n:::\n\n\n## Agresti-Coull Method\n\nThe {cardx} package calculates the Agresti-Coull method using the equation from the published method by Alan Agresti & Brent Coull based on adding 2 successes and 2 failures before computing the wald CI. The CI is truncated, when it overshoots the boundary (\\<0 or \\>1).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package agresti_coull method\ncardx::proportion_ci_agresti_coull(act2, conf.level = 0.95) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.174 0.307 0.95 Agresti-Coull Confidence I…\n```\n\n\n:::\n:::\n\n\n## Jeffreys Method\n\nThe Jeffreys 'equal-tailed' method is a particular type of Bayesian method, which optimises central location instead of Highest Probability Density (HPD)[@brown2001]. For proportions, the beta distribution is generally used for the prior, which consists of two parameters alpha and beta. Setting alpha=beta=0.5 is called the Jeffreys prior. NOTE: if you want to use any other priors you can use the `ratesci:jeffreysci()` function which allows a different beta prior to be specified using the `ai` and `bi` arguments. Alternatively (and if you prefer to opt for an HPD approach not recommended by Brown et al.), you can use `binom::binom.bayes`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package jeffreys method\ncardx::proportion_ci_jeffreys(act2, conf.level = 0.95) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.172 0.305 0.95 Jeffreys Interval\n```\n\n\n:::\n:::\n\n\n## Binomial based mid-P Method\n\nThe table below (from {ratesci}) includes two versions of the mid-P interval, one derived iteratively from binomial tail proportions and another from quantiles of a beta distribution. These have been claimed to be equivalent[@brown2001], but it seems they are not quite identical - the beta version is very close to the Jeffreys interval. The first version closely matches with the result (0.17200131, 0.30545883) from SAS PROC FREQ:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::rateci(x = 36, n = 154, precis = 10)$ciarray[,,1] |>\n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 0 × 0\n```\n\n\n:::\n:::\n\n\nThe mid-P result from {DescTools} doesn't quite match with either of the above results, beyond the 4th decimal place, even with increased precision using the `tol` argument (which according to the documentation only affects the Blaker method). The documentation indicates that the mid-P interval is derived from the F-distribution, which suggests there are actually 3 slightly different versions of 'mid-P'.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nDescTools::BinomCI(x = 36, n = 154, \n tol = 1e-10,\n method = c(\"jeffreys\", \"wilson\", \"midp\", \"clopper-pearson\")) |> \n as_tibble(rownames = \"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 4 × 4\n Method est lwr.ci upr.ci\n \n1 jeffreys 0.234 0.172 0.305\n2 wilson 0.234 0.174 0.307\n3 midp 0.234 0.172 0.305\n4 clopper-pearson 0.234 0.169 0.309\n```\n\n\n:::\n:::\n\n\nThe PropCIs version only displays 4dps, because the calculations are performed on sequential values of p at intervals of 0.0005. Therefore the 4th decimal place shown is only ever 1 or 6, and the PropCIs output for this method is only reliable up to the 3rd decimal place. (A similar precision issue is also noted for the `exactci::binom.exact(..., midp = TRUE)` function, which is probably due to reliance on `uniroot()` with its default value for the `tol` argument.)\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPropCIs::midPci(x = 36, n = 154, conf.level = 0.95) \n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\n\ndata: \n\n95 percent confidence interval:\n 0.1721 0.3051\n```\n\n\n:::\n:::\n\n\n## Blaker Method\n\nThe Blaker 'exact' CI matching SAS output (to at least 7 dps) may be obtained as follows from {ratesci}, or from the dedicated {BlakerCI} package. Output from `DescTools::BinomCI` and `PropCIs::blakerci()` is less precise, matching to 4 decimal places.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits=8)\n# ratesci package Blaker method \nratesci::rateci(36, 154, cc = TRUE)$blaker |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 5\n lower est upper x n\n \n1 0.170 0.234 0.307 36 154\n```\n\n\n:::\n:::\n\n\n# Continuity Adjusted Methods\n\nContinuity adjustments (at a magnitude of $\\gamma / N$ where conventionally $\\gamma = 0.5$ but smaller values may be used) can be applied to the Wald, Wilson and SCAS formulae.\n\nHere is an example of the adjusted Wilson interval from {cardx}:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# cardx package Wilson method with continuity correction\n\ncardx::proportion_ci_wilson(act2, conf.level = 0.95, correct = TRUE) |> \n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 11\n N n conf.level estimate statistic p.value parameter conf.low\n \n1 154 36 0.95 0.234 42.6 6.70e-11 1 0.171\n# ℹ 3 more variables: conf.high , method , alternative \n```\n\n\n:::\n:::\n\n\nIn principle, the concept can also be applied to the Jeffreys CI, although with $\\gamma=0.5$ this turns out to be identical to the Clopper-Pearson interval.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::rateci(x = 36, n = 154, cc = 0.5)$ciarray |>\n drop() |>\n as_tibble(rownames=\"Method\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 0 × 0\n```\n\n\n:::\n:::\n\n\n# Consistency with hypothesis tests\n\nThe consistency of CIs for a proportion with a hypothesis test depends on what test is specified. Some confidence interval methods are derived by inverting a test, and therefore guarantee consistency. This includes Clopper Pearson (exact binomial test from `prop.test()`), Wilson Score (asymptotic binomial test from `binom.test()` using the standard error estimated under the null hypothesis), and mid-P (exact test with mid-P adjustment, e.g. from `exactci::binom.exact(..., midp = TRUE)`), although given the observations above, it would be important to select the right version of the mid-P interval. For the SCAS method, the CI is consistent with a skewness-corrected version of the asymptotic test (output by the `ratesci::scoreci()` function).\n\nThe output from some of the {cardx} functions (e.g. `proportion_ci_wilson()`) and `proportion_ci_clopperpearson()` includes a p-value, which is not mentioned in the package documentation. It is a test against a null hypothesis of p=0.5, which is the default value used by the underlying `binom.test` and `prop.test` functions. If a test against a specified value of p was required, then those underlying functions should be used to obtain the p-value, as there is no facility to select a different null value in the {cardx} functions.\n\n# Stratified Score methods\n\n\\[To be moved to separate page for stratified analysis?\\]\n\n{cardx} also contains a function `proportion_ci_strat_wilson()` which calculates stratified Wilson CIs for unequal proportions[@yan2010], with or without continuity adjustment, using a combination of the CIs from each stratum. The default weights (not clearly documented) are presumably those described in the publication, which aim to minimise the weighted sum of the squared interval lengths. The function has the facility to specify other weights (such as proportional to stratum size, for example, or adjusted for known population weights). Examples shown use the `adcibc2` dataset, stratified by age group.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nactstrat <- adcibc2 |>\n select(TRTP, AVAL, AGEGR1) |>\n filter(TRTP != \"Placebo\") |>\n mutate(\n respn = if_else(AVAL > 4, 1, 0),\n agegrp = factor(AGEGR1)\n ) |>\n select(respn, agegrp)\nactage <- actstrat$agegrp\n\ntable(actage, act2) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 6 × 3\n actage act2 n\n \n1 <65 0 13\n2 >80 0 32\n3 65-80 0 73\n4 <65 1 4\n5 >80 1 11\n6 65-80 1 21\n```\n\n\n:::\n\n```{.r .cell-code}\ntable(actage) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 3 × 2\n actage n\n \n1 <65 17\n2 >80 43\n3 65-80 94\n```\n\n\n:::\n\n```{.r .cell-code}\ncardx::proportion_ci_strat_wilson(x = act2, strata = actage) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 3 × 8\n N n estimate conf.low conf.high conf.level weights method \n \n1 154 36 0.234 0.174 0.307 0.95 0.113 Stratified Wilson …\n2 154 36 0.234 0.174 0.307 0.95 0.263 Stratified Wilson …\n3 154 36 0.234 0.174 0.307 0.95 0.623 Stratified Wilson …\n```\n\n\n:::\n\n```{.r .cell-code}\ncardx::proportion_ci_strat_wilson(x = act2, strata = actage, weights = c(1/3, 1/3, 1/3)) |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 7\n N n estimate conf.low conf.high conf.level method \n \n1 154 36 0.234 0.166 0.332 0.95 Stratified Wilson Confiden…\n```\n\n\n:::\n:::\n\n\nThe {ratesci} package can also produce stratified Wilson or SCAS intervals (with or without continuity adjustment), though the underlying methodology is different - this version uses a stratified score function[@laud2017] (analogous to the Miettinen-Nurminen formula for stratified risk difference) instead of constructing the interval from a CI calculated separately for each stratum. For stratified analysis, the `ratesci::scoreci()` function takes inputs in the form of vectors of the numerators and denominators per stratum. By default, weights are proportional to stratum size, but custom weights are also catered for using the `wt` argument. Arbitrary fixed weights are shown here to allow comparison with the {cardx} version.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nratesci::scoreci(x1 = c(4, 21, 11), \n n1 = c(17, 94, 43), \n contrast = 'p', \n skew = FALSE, \n stratified = TRUE)$estimates |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n lower est upper level p1hat p1mle\n \n1 0.174 0.234 0.307 0.95 0.234 0.234\n```\n\n\n:::\n\n```{.r .cell-code}\nratesci::scoreci(x1 = c(4, 21, 11), \n n1 = c(17, 94, 43), \n wt = c(1,1,1), \n contrast = 'p', \n skew = FALSE, \n stratified = TRUE)$estimates |>\n as_tibble()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n lower est upper level p1hat p1mle\n \n1 0.164 0.238 0.332 0.95 0.238 0.238\n```\n\n\n:::\n:::\n\n\n# References\n", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/R/cmh/execute-results/html.json b/_freeze/R/cmh/execute-results/html.json index b4e603e3a..5f1989f67 100644 --- a/_freeze/R/cmh/execute-results/html.json +++ b/_freeze/R/cmh/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "4f45d4656eb8f12516ae57f6c0f1716a", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"CMH Test\"\n---\n\n# Cochran-Mantel-Haenszel Test\n\nThe classical CMH procedure tests for conditional independence in partial contingency tables for a 2 x 2 x K design. However, it can be generalized to tables of X x Y x K dimensions.\n\n## Available R packages\n\nWe did not find any R package that delivers all the same measures as SAS at once. Therefore, we tried out multiple packages:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
PackageGeneral AssociationRow Means DifferNonzero CorrelationM-H Odds RatioHomogeneity TestNote
stats::mantelhaen.test()Only provides common OR with confidence interval for 2x2xK. For the generalized case, the p-value is provided only
vcdExtra::CMHtest()Problems with sparsity in case of many strata; Do not use the types argument
\n
\n```\n\n:::\n:::\n\n\n## Data used\n\nWe will use the CDISC Pilot data set, which is publicly available on the PHUSE Test Data Factory repository. This data set served as the basis of the examples to follow.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n X STUDYID SITEID SITEGR1 USUBJID TRTSDT TRTEDT\n1 1 CDISCPILOT01 701 701 01-701-1015 2014-01-02 2014-07-02\n2 2 CDISCPILOT01 701 701 01-701-1023 2012-08-05 2012-09-01\n3 3 CDISCPILOT01 701 701 01-701-1028 2013-07-19 2014-01-14\n4 4 CDISCPILOT01 701 701 01-701-1033 2014-03-18 2014-03-31\n5 5 CDISCPILOT01 701 701 01-701-1034 2014-07-01 2014-12-30\n6 6 CDISCPILOT01 701 701 01-701-1047 2013-02-12 2013-03-09\n TRTP TRTPN AGE AGEGR1 AGEGR1N RACE RACEN SEX ITTFL EFFFL\n1 Placebo 0 63 <65 1 WHITE 1 F Y Y\n2 Placebo 0 64 <65 1 WHITE 1 M Y Y\n3 Xanomeline High Dose 81 71 65-80 2 WHITE 1 M Y Y\n4 Xanomeline Low Dose 54 74 65-80 2 WHITE 1 M Y Y\n5 Xanomeline High Dose 81 77 65-80 2 WHITE 1 F Y Y\n6 Placebo 0 85 >80 3 WHITE 1 F Y Y\n COMP24FL AVISIT AVISITN VISIT VISITNUM ADY ADT PARAMCD\n1 Y Week 8 8 WEEK 8 8 63 2014-03-05 CIBICVAL\n2 N Week 8 8 WEEK 4 5 29 2012-09-02 CIBICVAL\n3 Y Week 8 8 WEEK 8 8 54 2013-09-10 CIBICVAL\n4 N Week 8 8 WEEK 4 5 28 2014-04-14 CIBICVAL\n5 Y Week 8 8 WEEK 8 8 57 2014-08-26 CIBICVAL\n6 N Week 8 8 AMBUL ECG REMOVAL 6 46 2013-03-29 CIBICVAL\n PARAM PARAMN AVAL ANL01FL DTYPE AWRANGE AWTARGET AWTDIFF AWLO AWHI AWU\n1 CIBIC Score 1 4 Y NA 2-84 56 7 2 84 DAYS\n2 CIBIC Score 1 3 Y NA 2-84 56 27 2 84 DAYS\n3 CIBIC Score 1 4 Y NA 2-84 56 2 2 84 DAYS\n4 CIBIC Score 1 4 Y NA 2-84 56 28 2 84 DAYS\n5 CIBIC Score 1 4 Y NA 2-84 56 1 2 84 DAYS\n6 CIBIC Score 1 4 Y NA 2-84 56 10 2 84 DAYS\n QSSEQ\n1 6001\n2 6001\n3 6001\n4 6001\n5 6001\n6 6001\n```\n\n\n:::\n:::\n\n\n## Example Code\n\n### Base R\n\nThis is included in a base installation of R, as part of the default `stats` package. Requires inputting data as a *table* or as *vectors*. Two examples 2x2x2 examples: X=2 treatments, Y=Sex (M/F), controlling for 2 age groups and 3x2x3 example X=3 treatments, Y=Sex (M/F), controlling for 3 age groups.\n\nBy default, a continuity correction is applied when computing the test statistic for the 2 x 2 x K cases. This can be turned off by `correct=FALSE`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2x2x2 example\ndata2 <- data |>\n dplyr::filter(TRTPN != \"54\" & AGEGR1 != \">80\")\n\nstats::mantelhaen.test(x = data2$TRTP, y = data2$SEX, z = data2$AGEGR1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tMantel-Haenszel chi-squared test with continuity correction\n\ndata: data2$TRTP and data2$SEX and data2$AGEGR1\nMantel-Haenszel X-squared = 0.076264, df = 1, p-value = 0.7824\nalternative hypothesis: true common odds ratio is not equal to 1\n95 percent confidence interval:\n 0.5671347 2.5129874\nsample estimates:\ncommon odds ratio \n 1.193818 \n```\n\n\n:::\n\n```{.r .cell-code}\nstats::mantelhaen.test(x = data2$TRTP, y = data2$SEX, z = data2$AGEGR1, correct=FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tMantel-Haenszel chi-squared test without continuity correction\n\ndata: data2$TRTP and data2$SEX and data2$AGEGR1\nMantel-Haenszel X-squared = 0.21655, df = 1, p-value = 0.6417\nalternative hypothesis: true common odds ratio is not equal to 1\n95 percent confidence interval:\n 0.5671347 2.5129874\nsample estimates:\ncommon odds ratio \n 1.193818 \n```\n\n\n:::\n\n```{.r .cell-code}\n# 3x2x3 example\nstats::mantelhaen.test(x = data$TRTP, y = data$SEX, z = data$AGEGR1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tCochran-Mantel-Haenszel test\n\ndata: data$TRTP and data$SEX and data$AGEGR1\nCochran-Mantel-Haenszel M^2 = 2.482, df = 2, p-value = 0.2891\n```\n\n\n:::\n:::\n\n\n### vcdExtra\n\nThe `vcdExtra` package provides results for the generalized CMH test, for each of the three model it outputs the Chi-square value and the respective p-values. Flexible data input methods available: *table* or *formula* (aggregated level data in a data frame).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(vcdExtra)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: vcd\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: grid\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: gnm\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nRegistered S3 method overwritten by 'vcdExtra':\n method from\n print.Kappa vcd \n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'vcdExtra'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:vcd':\n\n woolf_test\n```\n\n\n:::\n\n```{.r .cell-code}\n# Formula: Freq ~ X + Y | K\n\n# case 2 x 2 x 2 (no continuity correction is applied)\nvcdExtra::CMHtest(Freq ~ TRTP + SEX | AGEGR1, data = data2, overall = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$`AGEGR1:<65`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:<65 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 1.4933 1 0.2217\nrmeans Row mean scores differ 1.4933 1 0.2217\ncmeans Col mean scores differ 1.4933 1 0.2217\ngeneral General association 1.4933 1 0.2217\n\n\n$`AGEGR1:65-80`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:65-80 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.0090703 1 0.92413\nrmeans Row mean scores differ 0.0090703 1 0.92413\ncmeans Col mean scores differ 0.0090703 1 0.92413\ngeneral General association 0.0090703 1 0.92413\n\n\n$ALL\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tOverall tests, controlling for all strata \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.21655 1 0.64168\nrmeans Row mean scores differ 0.21655 1 0.64168\ncmeans Col mean scores differ 0.21655 1 0.64168\ngeneral General association 0.21655 1 0.64168\n```\n\n\n:::\n\n```{.r .cell-code}\n# case 3 x 2 x 3\nvcdExtra::CMHtest(Freq ~ TRTP + SEX | AGEGR1, data = data, overall = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$`AGEGR1:<65`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:<65 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.33168 1 0.56467\nrmeans Row mean scores differ 1.52821 2 0.46575\ncmeans Col mean scores differ 0.33168 1 0.56467\ngeneral General association 1.52821 2 0.46575\n\n\n$`AGEGR1:>80`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:>80 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.39433 1 0.53003\nrmeans Row mean scores differ 3.80104 2 0.14949\ncmeans Col mean scores differ 0.39433 1 0.53003\ngeneral General association 3.80104 2 0.14949\n\n\n$`AGEGR1:65-80`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:65-80 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.52744 1 0.46768\nrmeans Row mean scores differ 0.62921 2 0.73008\ncmeans Col mean scores differ 0.52744 1 0.46768\ngeneral General association 0.62921 2 0.73008\n\n\n$ALL\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tOverall tests, controlling for all strata \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.00086897 1 0.97648\nrmeans Row mean scores differ 2.482 2 0.28909\ncmeans Col mean scores differ 0.00086897 1 0.97648\ngeneral General association 2.482 2 0.28909\n```\n\n\n:::\n:::\n\n\nUsing the `vcd` package, the odds ratio for each 2x2 table can be calculated and the Woolf-test on Homogeneity of Odds Ratios can be performed.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntab0 = table(data2$TRTP, data2$SEX, data2$AGEGR1)\nvcd::oddsratio(tab0, log=FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n odds ratios for and by \n\n <65 65-80 \n 3.00 0.96 \n```\n\n\n:::\n\n```{.r .cell-code}\nvcd::woolf_test(tab0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWoolf-test on Homogeneity of Odds Ratios (no 3-Way assoc.)\n\ndata: tab0\nX-squared = 1.3339, df = 1, p-value = 0.2481\n```\n\n\n:::\n:::\n\n\nNote the Woolf test is available in other packages, for example `DescTools`.\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(DescTools)\nDescTools::WoolfTest(tab0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWoolf Test on Homogeneity of Odds Ratios (no 3-Way assoc.)\n\ndata: tab0\nX-squared = 1.3339, df = 1, p-value = 0.2481\n```\n\n\n:::\n:::\n\n\n\n### Forked Version of vcdExtra - Solution for sparse data\n\nTo tackle the [issue with sparse data](https://github.com/friendly/vcdExtra/issues/3) it was suggested that `solve()` is replaced with `MASS::ginv`. This was implemented in the forked version of vcdExtra (https://github.com/mstackhouse/vcdExtra). However this was never implemented in the vcdExtra package itself, and therefore never extensively tested.\n\n\n# References\n\nAccessible Summary: https://online.stat.psu.edu/stat504/lesson/4/4.4\n\nAn Introduction to Categorical Data Analysis 2nd Edition (Agresti): http://users.stat.ufl.edu/\\~aa/\n\nOriginal Paper 1: https://doi.org/10.2307%2F3001616\n\nOriginal Paper 2: https://doi.org/10.1093/jnci/22.4.719\n", + "markdown": "---\ntitle: \"CMH Test\"\n---\n\n# Cochran-Mantel-Haenszel Test\n\nThe classical CMH procedure tests for conditional independence in partial contingency tables for a 2 x 2 x K design. However, it can be generalized to tables of X x Y x K dimensions.\n\n## Available R packages\n\nWe did not find any R package that delivers all the same measures as SAS at once. Therefore, we tried out multiple packages:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
PackageGeneral AssociationRow Means DifferNonzero CorrelationM-H Odds RatioHomogeneity TestNote
stats::mantelhaen.test()Only provides common OR with confidence interval for 2x2xK. For the generalized case, the p-value is provided only
vcdExtra::CMHtest()Problems with sparsity in case of many strata; Do not use the types argument
\n
\n```\n\n:::\n:::\n\n\n## Data used\n\nWe will use the CDISC Pilot data set, which is publicly available on the PHUSE Test Data Factory repository. This data set served as the basis of the examples to follow.\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n X STUDYID SITEID SITEGR1 USUBJID TRTSDT TRTEDT\n1 1 CDISCPILOT01 701 701 01-701-1015 2014-01-02 2014-07-02\n2 2 CDISCPILOT01 701 701 01-701-1023 2012-08-05 2012-09-01\n3 3 CDISCPILOT01 701 701 01-701-1028 2013-07-19 2014-01-14\n4 4 CDISCPILOT01 701 701 01-701-1033 2014-03-18 2014-03-31\n5 5 CDISCPILOT01 701 701 01-701-1034 2014-07-01 2014-12-30\n6 6 CDISCPILOT01 701 701 01-701-1047 2013-02-12 2013-03-09\n TRTP TRTPN AGE AGEGR1 AGEGR1N RACE RACEN SEX ITTFL EFFFL\n1 Placebo 0 63 <65 1 WHITE 1 F Y Y\n2 Placebo 0 64 <65 1 WHITE 1 M Y Y\n3 Xanomeline High Dose 81 71 65-80 2 WHITE 1 M Y Y\n4 Xanomeline Low Dose 54 74 65-80 2 WHITE 1 M Y Y\n5 Xanomeline High Dose 81 77 65-80 2 WHITE 1 F Y Y\n6 Placebo 0 85 >80 3 WHITE 1 F Y Y\n COMP24FL AVISIT AVISITN VISIT VISITNUM ADY ADT PARAMCD\n1 Y Week 8 8 WEEK 8 8 63 2014-03-05 CIBICVAL\n2 N Week 8 8 WEEK 4 5 29 2012-09-02 CIBICVAL\n3 Y Week 8 8 WEEK 8 8 54 2013-09-10 CIBICVAL\n4 N Week 8 8 WEEK 4 5 28 2014-04-14 CIBICVAL\n5 Y Week 8 8 WEEK 8 8 57 2014-08-26 CIBICVAL\n6 N Week 8 8 AMBUL ECG REMOVAL 6 46 2013-03-29 CIBICVAL\n PARAM PARAMN AVAL ANL01FL DTYPE AWRANGE AWTARGET AWTDIFF AWLO AWHI AWU\n1 CIBIC Score 1 4 Y NA 2-84 56 7 2 84 DAYS\n2 CIBIC Score 1 3 Y NA 2-84 56 27 2 84 DAYS\n3 CIBIC Score 1 4 Y NA 2-84 56 2 2 84 DAYS\n4 CIBIC Score 1 4 Y NA 2-84 56 28 2 84 DAYS\n5 CIBIC Score 1 4 Y NA 2-84 56 1 2 84 DAYS\n6 CIBIC Score 1 4 Y NA 2-84 56 10 2 84 DAYS\n QSSEQ\n1 6001\n2 6001\n3 6001\n4 6001\n5 6001\n6 6001\n```\n\n\n:::\n:::\n\n\n## Example Code\n\n### Base R\n\nThis is included in a base installation of R, as part of the default `stats` package. Requires inputting data as a *table* or as *vectors*. Two examples 2x2x2 examples: X=2 treatments, Y=Sex (M/F), controlling for 2 age groups and 3x2x3 example X=3 treatments, Y=Sex (M/F), controlling for 3 age groups.\n\nBy default, a continuity correction is applied when computing the test statistic for the 2 x 2 x K cases. This can be turned off by `correct=FALSE`\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2x2x2 example\ndata2 <- data |>\n dplyr::filter(TRTPN != \"54\" & AGEGR1 != \">80\")\n\nstats::mantelhaen.test(x = data2$TRTP, y = data2$SEX, z = data2$AGEGR1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tMantel-Haenszel chi-squared test with continuity correction\n\ndata: data2$TRTP and data2$SEX and data2$AGEGR1\nMantel-Haenszel X-squared = 0.076264, df = 1, p-value = 0.7824\nalternative hypothesis: true common odds ratio is not equal to 1\n95 percent confidence interval:\n 0.5671347 2.5129874\nsample estimates:\ncommon odds ratio \n 1.193818 \n```\n\n\n:::\n\n```{.r .cell-code}\nstats::mantelhaen.test(x = data2$TRTP, y = data2$SEX, z = data2$AGEGR1, correct=FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tMantel-Haenszel chi-squared test without continuity correction\n\ndata: data2$TRTP and data2$SEX and data2$AGEGR1\nMantel-Haenszel X-squared = 0.21655, df = 1, p-value = 0.6417\nalternative hypothesis: true common odds ratio is not equal to 1\n95 percent confidence interval:\n 0.5671347 2.5129874\nsample estimates:\ncommon odds ratio \n 1.193818 \n```\n\n\n:::\n\n```{.r .cell-code}\n# 3x2x3 example\nstats::mantelhaen.test(x = data$TRTP, y = data$SEX, z = data$AGEGR1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tCochran-Mantel-Haenszel test\n\ndata: data$TRTP and data$SEX and data$AGEGR1\nCochran-Mantel-Haenszel M^2 = 2.482, df = 2, p-value = 0.2891\n```\n\n\n:::\n:::\n\n\n### vcdExtra\n\nThe `vcdExtra` package provides results for the generalized CMH test, for each of the three model it outputs the Chi-square value and the respective p-values. Flexible data input methods available: *table* or *formula* (aggregated level data in a data frame).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(vcdExtra)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: vcd\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: grid\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: gnm\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nRegistered S3 method overwritten by 'vcdExtra':\n method from\n print.Kappa vcd \n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'vcdExtra'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:vcd':\n\n woolf_test\n```\n\n\n:::\n\n```{.r .cell-code}\n# Formula: Freq ~ X + Y | K\n\n# case 2 x 2 x 2 (no continuity correction is applied)\nvcdExtra::CMHtest(Freq ~ TRTP + SEX | AGEGR1, data = data2, overall = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$`AGEGR1:<65`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:<65 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 1.4933 1 0.2217\nrmeans Row mean scores differ 1.4933 1 0.2217\ncmeans Col mean scores differ 1.4933 1 0.2217\ngeneral General association 1.4933 1 0.2217\n\n\n$`AGEGR1:65-80`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:65-80 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.0090703 1 0.92413\nrmeans Row mean scores differ 0.0090703 1 0.92413\ncmeans Col mean scores differ 0.0090703 1 0.92413\ngeneral General association 0.0090703 1 0.92413\n\n\n$ALL\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tOverall tests, controlling for all strata \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.21655 1 0.64168\nrmeans Row mean scores differ 0.21655 1 0.64168\ncmeans Col mean scores differ 0.21655 1 0.64168\ngeneral General association 0.21655 1 0.64168\n```\n\n\n:::\n\n```{.r .cell-code}\n# case 3 x 2 x 3\nvcdExtra::CMHtest(Freq ~ TRTP + SEX | AGEGR1, data = data, overall = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$`AGEGR1:<65`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:<65 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.33168 1 0.56467\nrmeans Row mean scores differ 1.52821 2 0.46575\ncmeans Col mean scores differ 0.33168 1 0.56467\ngeneral General association 1.52821 2 0.46575\n\n\n$`AGEGR1:>80`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:>80 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.39433 1 0.53003\nrmeans Row mean scores differ 3.80104 2 0.14949\ncmeans Col mean scores differ 0.39433 1 0.53003\ngeneral General association 3.80104 2 0.14949\n\n\n$`AGEGR1:65-80`\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tin stratum AGEGR1:65-80 \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.52744 1 0.46768\nrmeans Row mean scores differ 0.62921 2 0.73008\ncmeans Col mean scores differ 0.52744 1 0.46768\ngeneral General association 0.62921 2 0.73008\n\n\n$ALL\nCochran-Mantel-Haenszel Statistics for TRTP by SEX \n\tOverall tests, controlling for all strata \n\n AltHypothesis Chisq Df Prob\ncor Nonzero correlation 0.00086897 1 0.97648\nrmeans Row mean scores differ 2.482 2 0.28909\ncmeans Col mean scores differ 0.00086897 1 0.97648\ngeneral General association 2.482 2 0.28909\n```\n\n\n:::\n:::\n\n\nUsing the `vcd` package, the odds ratio for each 2x2 table can be calculated and the Woolf-test on Homogeneity of Odds Ratios can be performed.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntab0 = table(data2$TRTP, data2$SEX, data2$AGEGR1)\nvcd::oddsratio(tab0, log=FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n odds ratios for and by \n\n <65 65-80 \n 3.00 0.96 \n```\n\n\n:::\n\n```{.r .cell-code}\nvcd::woolf_test(tab0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWoolf-test on Homogeneity of Odds Ratios (no 3-Way assoc.)\n\ndata: tab0\nX-squared = 1.3339, df = 1, p-value = 0.2481\n```\n\n\n:::\n:::\n\n\nNote the Woolf test is available in other packages, for example `DescTools`.\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(DescTools)\nDescTools::WoolfTest(tab0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWoolf Test on Homogeneity of Odds Ratios (no 3-Way assoc.)\n\ndata: tab0\nX-squared = 1.3339, df = 1, p-value = 0.2481\n```\n\n\n:::\n:::\n\n\n\n### Forked Version of vcdExtra - Solution for sparse data\n\nTo tackle the [issue with sparse data](https://github.com/friendly/vcdExtra/issues/3) it was suggested that `solve()` is replaced with `MASS::ginv`. This was implemented in the forked version of vcdExtra (https://github.com/mstackhouse/vcdExtra). However this was never implemented in the vcdExtra package itself, and therefore never extensively tested.\n\n\n# References\n\nAccessible Summary: https://online.stat.psu.edu/stat504/lesson/4/4.4\n\nAn Introduction to Categorical Data Analysis 2nd Edition (Agresti): http://users.stat.ufl.edu/\\~aa/\n\nOriginal Paper 1: https://doi.org/10.2307%2F3001616\n\nOriginal Paper 2: https://doi.org/10.1093/jnci/22.4.719\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/count_data_regression/figure-html/unnamed-chunk-3-1.png b/_freeze/R/count_data_regression/figure-html/unnamed-chunk-3-1.png index 70bd5b8da..082f8c871 100644 Binary files a/_freeze/R/count_data_regression/figure-html/unnamed-chunk-3-1.png and b/_freeze/R/count_data_regression/figure-html/unnamed-chunk-3-1.png differ diff --git a/_freeze/R/friedman_test/execute-results/html.json b/_freeze/R/friedman_test/execute-results/html.json index a1629e8cf..165a5136a 100644 --- a/_freeze/R/friedman_test/execute-results/html.json +++ b/_freeze/R/friedman_test/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "d288eb607ad4bec254cf11e0bd79d26e", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Friedman Test Analysis\"\ndate: \"2025-04-09\"\n---\n\n## Friedman Test Analysis\n\nFriedman's test is a non-parametric statistical test used to detect differences in treatments across multiple test attempts. It is often used when the assumptions of ANOVA are not met, particularly the assumption of normality. The test is applicable for repeated measures, or matched groups, making it useful for situations where the same subjects are subjected to different treatments.\n\nFriedman’s test ranks the data points within each block (or subject) separately, and then analyzes these ranks to see if the mean ranks differ between the groups and conditions. If the test shows significant differences, this suggests that at least one of the treatments differs from the others. Because it is non-parametric, it does not assume the normal distribution of data, which makes it robust for skewed or ordinal data.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Load required packages\nlibrary(tidyverse)\nlibrary(broom)\nlibrary(rstatix)\n```\n:::\n\n\n\n### Hypotheses\n\nH₀ (Null Hypothesis): There are no significant differences in weight outcomes between the three diets\n\nH₁ (Alternative Hypothesis): There are significant differences in weight outcomes between at least two diets \n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create the dataset\nDiet_A = c(75, 68, 80, 72, 85, 70, 82, 78, 75, 83)\n\nDiet_B = c(82, 70, 85, 78, 88, 75, 85, 80, 79, 87)\n\nDiet_C = c(78, 65, 82, 75, 84, 72, 80, 76, 77, 84)\n\ndata <- tibble(\n subjid = rep(1:10, 3),\n diet = rep(c(\"A\", \"B\", \"C\"), each = 10),\n weight = c(Diet_A, Diet_B, Diet_C)\n)\n```\n:::\n\n\n### Base R {stats}\n\nTo run a Friedman's test in R you can use the {stats} package. This will return the chi-squared test statistic and p-value. \n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform Friedman test\nfriedman_test <- stats::friedman.test(weight ~ diet | subjid, data = data)\nfriedman_test |>\n broom::tidy()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 4\n statistic p.value parameter method \n \n1 15.2 0.000500 2 Friedman rank sum test\n```\n\n\n:::\n:::\n\n\n### {rstatix}\n\nAlternatively, you can use the {rstatix} package. While these packages give the same results, the {rstatix} results come as a tibble we can easily use. \n\n::: {.cell}\n\n```{.r .cell-code}\ntest <- data |>\n rstatix::friedman_test(weight ~ diet | subjid)\ntest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n .y. n statistic df p method \n* \n1 weight 10 15.2 2 0.000500 Friedman test\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create boxplot\nggplot(data, aes(x = diet, y = weight, fill = diet)) +\n geom_boxplot() +\n theme_minimal() +\n labs(\n title = \"Weight Distribution Across Different Diets\",\n x = \"Diet Type\",\n y = \"Weight\"\n )\n```\n\n::: {.cell-output-display}\n![](friedman_test_files/figure-html/visualization-1.png){width=768}\n:::\n:::\n\n\n### Conclusion\n\nBased on the analysis:\n\n1. **Statistical Test Results:**\n\n- The Friedman test yielded a p-value of 5.0045143\\times 10^{-4}\n\n\\[If p \\< 0.05, we reject the null hypothesis\n\nIf p \\> 0.05, we fail to reject the null hypothesis\\]\n\n2. **Visual Analysis:**\n\n- From the boxplot, Diet B shows the highest median weight\n\n- Diet B also appears to have the highest overall weight distribution\n\n- Diet A and Diet C show similar distributions but lower than Diet B\n\n\n3. **Interpretation:**\n\n- If the goal is weight gain: Diet B appears most effective\n\n- If the goal is weight maintenance: Diet A or C might be more suitable\n\n- However, individual responses vary, as shown by the overlapping distributions\n\n## Reference\n\n*Cite all sources and references used in the analysis.*\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P abind 1.4-8 2024-09-12 [?] RSPM\n askpass 1.2.1 2024-10-04 [1] RSPM\n P backports 1.5.0 2024-05-23 [?] RSPM\n base64enc 0.1-6 2026-02-02 [1] RSPM\n bit 4.6.0 2025-03-06 [1] RSPM\n bit64 4.6.0-1 2025-01-16 [1] RSPM\n blob 1.3.0 2026-01-14 [1] RSPM\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.2)\n P broom * 1.0.12 2026-01-27 [?] RSPM\n bslib 0.10.0 2026-01-26 [1] RSPM\n cachem 1.1.0 2024-05-16 [1] RSPM\n callr 3.7.6 2024-03-25 [1] RSPM\n P car 3.1-5 2026-02-03 [?] RSPM\n P carData 3.0-6 2026-01-30 [?] RSPM\n cellranger 1.1.0 2016-07-27 [1] RSPM\n P cli 3.6.5 2025-04-23 [?] RSPM\n clipr 0.8.0 2022-02-22 [1] RSPM\n colorspace 2.1-2 2025-09-22 [1] RSPM\n conflicted 1.2.0 2023-02-01 [1] RSPM\n corrplot 0.95 2024-10-14 [1] RSPM\n cowplot 1.2.0 2025-07-07 [1] RSPM\n cpp11 0.5.3 2026-01-20 [1] RSPM\n crayon 1.5.3 2024-06-20 [1] RSPM\n curl 7.0.0 2025-08-19 [1] RSPM\n data.table 1.18.2.1 2026-01-27 [1] RSPM\n DBI 1.2.3 2024-06-02 [1] RSPM\n dbplyr 2.5.2 2026-02-13 [1] RSPM\n Deriv 4.2.0 2025-06-20 [1] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n doBy 4.7.1 2025-12-02 [1] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n dtplyr 1.3.3 2026-02-11 [1] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P farver 2.1.2 2024-05-13 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n fontawesome 0.5.3 2024-11-16 [1] RSPM\n P forcats * 1.0.1 2025-09-25 [?] RSPM\n forecast 9.0.1 2026-02-14 [1] RSPM\n P Formula 1.2-5 2023-02-24 [?] RSPM\n fracdiff 1.5-3 2024-02-01 [1] RSPM\n fs 1.6.6 2025-04-12 [1] RSPM\n gargle 1.6.1 2026-01-29 [1] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n googledrive 2.1.2 2025-09-10 [1] RSPM\n googlesheets4 1.1.2 2025-09-03 [1] RSPM\n P gtable 0.3.6 2024-10-25 [?] RSPM\n haven 2.5.5 2025-05-30 [1] RSPM\n highr 0.11 2024-05-26 [1] RSPM\n P hms 1.1.4 2025-10-17 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n httr 1.4.8 2026-02-13 [1] RSPM\n ids 1.0.1 2017-05-31 [1] RSPM\n isoband 0.3.0 2025-12-07 [1] RSPM\n jquerylib 0.1.4 2021-04-26 [1] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n P labeling 0.4.3 2023-08-29 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n lme4 1.1-38 2025-12-02 [1] RSPM\n lmtest 0.9-40 2022-03-21 [1] RSPM\n P lubridate * 1.9.5 2026-02-04 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n MatrixModels 0.5-4 2025-03-26 [1] RSPM\n memoise 2.0.1 2021-11-26 [1] RSPM\n mgcv 1.9-3 2025-04-04 [2] CRAN (R 4.5.2)\n microbenchmark 1.5.0 2024-09-04 [1] RSPM\n mime 0.13 2025-03-17 [1] RSPM\n minqa 1.2.8 2024-08-17 [1] RSPM\n modelr 0.1.11 2023-03-22 [1] RSPM\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n nloptr 2.2.1 2025-03-17 [1] RSPM\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.2)\n numDeriv 2016.8-1.1 2019-06-06 [1] RSPM\n openssl 2.3.4 2025-09-30 [1] RSPM\n pbkrtest 0.5.5 2025-07-18 [1] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n prettyunits 1.2.0 2023-09-24 [1] RSPM\n processx 3.8.6 2025-02-21 [1] RSPM\n progress 1.2.3 2023-12-06 [1] RSPM\n ps 1.9.1 2025-04-12 [1] RSPM\n P purrr * 1.2.1 2026-01-09 [?] RSPM\n quantreg 6.1 2025-03-10 [1] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n ragg 1.5.0 2025-09-02 [1] RSPM\n rappdirs 0.3.4 2026-01-17 [1] RSPM\n rbibutils 2.4.1 2026-01-21 [1] RSPM\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM\n Rcpp 1.1.1 2026-01-10 [1] RSPM\n RcppArmadillo 15.2.3-1 2025-12-17 [1] RSPM\n RcppEigen 0.3.4.0.2 2024-08-24 [1] RSPM\n Rdpack 2.6.6 2026-02-08 [1] RSPM\n P readr * 2.1.6 2025-11-14 [?] RSPM\n readxl 1.4.5 2025-03-07 [1] RSPM\n reformulas 0.4.4 2026-02-02 [1] RSPM\n rematch 2.0.0 2023-08-30 [1] RSPM\n rematch2 2.1.2 2020-05-01 [1] RSPM\n reprex 2.1.1 2024-07-06 [1] RSPM\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n P rstatix * 0.7.3 2025-10-18 [?] RSPM\n rstudioapi 0.18.0 2026-01-16 [1] RSPM\n rvest 1.0.5 2025-08-29 [1] RSPM\n P S7 0.2.1 2025-11-14 [?] RSPM\n sass 0.4.10 2025-04-11 [1] RSPM\n P scales 1.4.0 2025-04-24 [?] RSPM\n selectr 0.5-1 2025-12-17 [1] RSPM\n SparseM 1.84-2 2024-07-17 [1] RSPM\n P stringi 1.8.7 2025-03-27 [?] RSPM\n P stringr * 1.6.0 2025-11-04 [?] RSPM\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n sys 3.4.3 2024-10-04 [1] RSPM\n systemfonts 1.3.1 2025-10-01 [1] RSPM\n textshaping 1.0.4 2025-10-10 [1] RSPM\n P tibble * 3.3.1 2026-01-11 [?] RSPM\n P tidyr * 1.3.2 2025-12-19 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM\n P timechange 0.4.0 2026-01-29 [?] RSPM\n timeDate 4052.112 2026-01-28 [1] RSPM\n tinytex 0.58 2025-11-19 [1] RSPM\n P tzdb 0.5.0 2025-03-15 [?] RSPM\n urca 1.3-4 2024-05-27 [1] RSPM\n P utf8 1.2.6 2025-06-08 [?] RSPM\n uuid 1.2-2 2026-01-23 [1] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n viridisLite 0.4.3 2026-02-04 [1] RSPM\n vroom 1.7.0 2026-01-27 [1] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n xml2 1.5.2 2026-01-17 [1] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n zoo 1.8-15 2025-12-15 [1] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n", + "markdown": "---\ntitle: \"Friedman Test Analysis\"\ndate: \"2025-04-09\"\n---\n\n## Friedman Test Analysis\n\nFriedman's test is a non-parametric statistical test used to detect differences in treatments across multiple test attempts. It is often used when the assumptions of ANOVA are not met, particularly the assumption of normality. The test is applicable for repeated measures, or matched groups, making it useful for situations where the same subjects are subjected to different treatments.\n\nFriedman’s test ranks the data points within each block (or subject) separately, and then analyzes these ranks to see if the mean ranks differ between the groups and conditions. If the test shows significant differences, this suggests that at least one of the treatments differs from the others. Because it is non-parametric, it does not assume the normal distribution of data, which makes it robust for skewed or ordinal data.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Load required packages\nlibrary(tidyverse)\nlibrary(broom)\nlibrary(rstatix)\n```\n:::\n\n\n\n### Hypotheses\n\nH₀ (Null Hypothesis): There are no significant differences in weight outcomes between the three diets\n\nH₁ (Alternative Hypothesis): There are significant differences in weight outcomes between at least two diets \n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create the dataset\nDiet_A = c(75, 68, 80, 72, 85, 70, 82, 78, 75, 83)\n\nDiet_B = c(82, 70, 85, 78, 88, 75, 85, 80, 79, 87)\n\nDiet_C = c(78, 65, 82, 75, 84, 72, 80, 76, 77, 84)\n\ndata <- tibble(\n subjid = rep(1:10, 3),\n diet = rep(c(\"A\", \"B\", \"C\"), each = 10),\n weight = c(Diet_A, Diet_B, Diet_C)\n)\n```\n:::\n\n\n### Base R {stats}\n\nTo run a Friedman's test in R you can use the {stats} package. This will return the chi-squared test statistic and p-value. \n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform Friedman test\nfriedman_test <- stats::friedman.test(weight ~ diet | subjid, data = data)\nfriedman_test |>\n broom::tidy()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 4\n statistic p.value parameter method \n \n1 15.2 0.000500 2 Friedman rank sum test\n```\n\n\n:::\n:::\n\n\n### {rstatix}\n\nAlternatively, you can use the {rstatix} package. While these packages give the same results, the {rstatix} results come as a tibble we can easily use. \n\n::: {.cell}\n\n```{.r .cell-code}\ntest <- data |>\n rstatix::friedman_test(weight ~ diet | subjid)\ntest\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 6\n .y. n statistic df p method \n* \n1 weight 10 15.2 2 0.000500 Friedman test\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create boxplot\nggplot(data, aes(x = diet, y = weight, fill = diet)) +\n geom_boxplot() +\n theme_minimal() +\n labs(\n title = \"Weight Distribution Across Different Diets\",\n x = \"Diet Type\",\n y = \"Weight\"\n )\n```\n\n::: {.cell-output-display}\n![](friedman_test_files/figure-html/visualization-1.png){width=768}\n:::\n:::\n\n\n### Conclusion\n\nBased on the analysis:\n\n1. **Statistical Test Results:**\n\n- The Friedman test yielded a p-value of 5.0045143\\times 10^{-4}\n\n\\[If p \\< 0.05, we reject the null hypothesis\n\nIf p \\> 0.05, we fail to reject the null hypothesis\\]\n\n2. **Visual Analysis:**\n\n- From the boxplot, Diet B shows the highest median weight\n\n- Diet B also appears to have the highest overall weight distribution\n\n- Diet A and Diet C show similar distributions but lower than Diet B\n\n\n3. **Interpretation:**\n\n- If the goal is weight gain: Diet B appears most effective\n\n- If the goal is weight maintenance: Diet A or C might be more suitable\n\n- However, individual responses vary, as shown by the overlapping distributions\n\n## Reference\n\n*Cite all sources and references used in the analysis.*\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P abind 1.4-8 2024-09-12 [?] RSPM (R 4.5.0)\n askpass 1.2.1 2024-10-04 [1] RSPM (R 4.5.0)\n P backports 1.5.0 2024-05-23 [?] RSPM (R 4.5.0)\n base64enc 0.1-6 2026-02-02 [1] RSPM (R 4.5.0)\n bit 4.6.0 2025-03-06 [1] RSPM (R 4.5.0)\n bit64 4.6.0-1 2025-01-16 [1] RSPM (R 4.5.0)\n blob 1.3.0 2026-01-14 [1] RSPM (R 4.5.0)\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.2)\n P broom * 1.0.12 2026-01-27 [?] RSPM (R 4.5.0)\n bslib 0.10.0 2026-01-26 [1] RSPM (R 4.5.0)\n cachem 1.1.0 2024-05-16 [1] RSPM (R 4.5.0)\n callr 3.7.6 2024-03-25 [1] RSPM (R 4.5.0)\n P car 3.1-5 2026-02-03 [?] RSPM (R 4.5.0)\n P carData 3.0-6 2026-01-30 [?] RSPM (R 4.5.0)\n cellranger 1.1.0 2016-07-27 [1] RSPM (R 4.5.0)\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n clipr 0.8.0 2022-02-22 [1] RSPM (R 4.5.0)\n colorspace 2.1-2 2025-09-22 [1] RSPM (R 4.5.0)\n conflicted 1.2.0 2023-02-01 [1] RSPM (R 4.5.0)\n corrplot 0.95 2024-10-14 [1] RSPM (R 4.5.0)\n cowplot 1.2.0 2025-07-07 [1] RSPM (R 4.5.0)\n cpp11 0.5.3 2026-01-20 [1] RSPM (R 4.5.0)\n crayon 1.5.3 2024-06-20 [1] RSPM (R 4.5.0)\n curl 7.0.0 2025-08-19 [1] RSPM (R 4.5.0)\n data.table 1.18.2.1 2026-01-27 [1] RSPM (R 4.5.0)\n DBI 1.2.3 2024-06-02 [1] RSPM (R 4.5.0)\n dbplyr 2.5.2 2026-02-13 [1] RSPM (R 4.5.0)\n Deriv 4.2.0 2025-06-20 [1] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n doBy 4.7.1 2025-12-02 [1] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n dtplyr 1.3.3 2026-02-11 [1] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P farver 2.1.2 2024-05-13 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n fontawesome 0.5.3 2024-11-16 [1] RSPM (R 4.5.0)\n P forcats * 1.0.1 2025-09-25 [?] RSPM (R 4.5.0)\n forecast 9.0.1 2026-02-14 [1] RSPM (R 4.5.0)\n P Formula 1.2-5 2023-02-24 [?] RSPM (R 4.5.0)\n fracdiff 1.5-3 2024-02-01 [1] RSPM (R 4.5.0)\n fs 1.6.6 2025-04-12 [1] RSPM (R 4.5.0)\n gargle 1.6.1 2026-01-29 [1] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n googledrive 2.1.2 2025-09-10 [1] RSPM (R 4.5.0)\n googlesheets4 1.1.2 2025-09-03 [1] RSPM (R 4.5.0)\n P gtable 0.3.6 2024-10-25 [?] RSPM (R 4.5.0)\n haven 2.5.5 2025-05-30 [1] RSPM (R 4.5.0)\n highr 0.11 2024-05-26 [1] RSPM (R 4.5.0)\n P hms 1.1.4 2025-10-17 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n httr 1.4.8 2026-02-13 [1] RSPM (R 4.5.0)\n ids 1.0.1 2017-05-31 [1] RSPM (R 4.5.0)\n isoband 0.3.0 2025-12-07 [1] RSPM (R 4.5.0)\n jquerylib 0.1.4 2021-04-26 [1] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P labeling 0.4.3 2023-08-29 [?] RSPM (R 4.5.0)\n lattice 0.22-7 2025-04-02 [1] RSPM (R 4.5.0)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n lme4 1.1-38 2025-12-02 [1] RSPM (R 4.5.0)\n lmtest 0.9-40 2022-03-21 [1] RSPM (R 4.5.0)\n P lubridate * 1.9.5 2026-02-04 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n MatrixModels 0.5-4 2025-03-26 [1] RSPM (R 4.5.0)\n memoise 2.0.1 2021-11-26 [1] RSPM (R 4.5.0)\n mgcv 1.9-3 2025-04-04 [1] RSPM (R 4.5.0)\n microbenchmark 1.5.0 2024-09-04 [1] RSPM (R 4.5.0)\n mime 0.13 2025-03-17 [1] RSPM (R 4.5.0)\n minqa 1.2.8 2024-08-17 [1] RSPM (R 4.5.0)\n modelr 0.1.11 2023-03-22 [1] RSPM (R 4.5.0)\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n nloptr 2.2.1 2025-03-17 [1] RSPM (R 4.5.0)\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.2)\n numDeriv 2016.8-1.1 2019-06-06 [1] RSPM (R 4.5.0)\n openssl 2.3.4 2025-09-30 [1] RSPM (R 4.5.0)\n pbkrtest 0.5.5 2025-07-18 [1] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n prettyunits 1.2.0 2023-09-24 [1] RSPM (R 4.5.0)\n processx 3.8.6 2025-02-21 [1] RSPM (R 4.5.0)\n progress 1.2.3 2023-12-06 [1] RSPM (R 4.5.0)\n ps 1.9.1 2025-04-12 [1] RSPM (R 4.5.0)\n P purrr * 1.2.1 2026-01-09 [?] RSPM (R 4.5.0)\n quantreg 6.1 2025-03-10 [1] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n ragg 1.5.0 2025-09-02 [1] RSPM (R 4.5.0)\n rappdirs 0.3.4 2026-01-17 [1] RSPM (R 4.5.0)\n rbibutils 2.4.1 2026-01-21 [1] RSPM (R 4.5.0)\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM (R 4.5.0)\n Rcpp 1.1.1 2026-01-10 [1] RSPM (R 4.5.0)\n RcppArmadillo 15.2.3-1 2025-12-17 [1] RSPM (R 4.5.0)\n RcppEigen 0.3.4.0.2 2024-08-24 [1] RSPM (R 4.5.0)\n Rdpack 2.6.6 2026-02-08 [1] RSPM (R 4.5.0)\n P readr * 2.1.6 2025-11-14 [?] RSPM (R 4.5.0)\n readxl 1.4.5 2025-03-07 [1] RSPM (R 4.5.0)\n reformulas 0.4.4 2026-02-02 [1] RSPM (R 4.5.0)\n rematch 2.0.0 2023-08-30 [1] RSPM (R 4.5.0)\n rematch2 2.1.2 2020-05-01 [1] RSPM (R 4.5.0)\n reprex 2.1.1 2024-07-06 [1] RSPM (R 4.5.0)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n P rstatix * 0.7.3 2025-10-18 [?] RSPM (R 4.5.0)\n rstudioapi 0.18.0 2026-01-16 [1] RSPM (R 4.5.0)\n rvest 1.0.5 2025-08-29 [1] RSPM (R 4.5.0)\n P S7 0.2.1 2025-11-14 [?] RSPM (R 4.5.0)\n sass 0.4.10 2025-04-11 [1] RSPM (R 4.5.0)\n P scales 1.4.0 2025-04-24 [?] RSPM (R 4.5.0)\n selectr 0.5-1 2025-12-17 [1] RSPM (R 4.5.0)\n SparseM 1.84-2 2024-07-17 [1] RSPM (R 4.5.0)\n P stringi 1.8.7 2025-03-27 [?] RSPM (R 4.5.0)\n P stringr * 1.6.0 2025-11-04 [?] RSPM (R 4.5.0)\n survival 3.8-3 2024-12-17 [1] RSPM (R 4.5.0)\n sys 3.4.3 2024-10-04 [1] RSPM (R 4.5.0)\n systemfonts 1.3.1 2025-10-01 [1] RSPM (R 4.5.0)\n textshaping 1.0.4 2025-10-10 [1] RSPM (R 4.5.0)\n P tibble * 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyr * 1.3.2 2025-12-19 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM (R 4.5.0)\n P timechange 0.4.0 2026-01-29 [?] RSPM (R 4.5.0)\n timeDate 4052.112 2026-01-28 [1] RSPM (R 4.5.0)\n tinytex 0.58 2025-11-19 [1] RSPM (R 4.5.0)\n P tzdb 0.5.0 2025-03-15 [?] RSPM (R 4.5.0)\n urca 1.3-4 2024-05-27 [1] RSPM (R 4.5.0)\n P utf8 1.2.6 2025-06-08 [?] RSPM (R 4.5.0)\n uuid 1.2-2 2026-01-23 [1] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n viridisLite 0.4.3 2026-02-04 [1] RSPM (R 4.5.0)\n vroom 1.7.0 2026-01-27 [1] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n xml2 1.5.2 2026-01-17 [1] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n zoo 1.8-15 2025-12-15 [1] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n", "supporting": [ "friedman_test_files" ], diff --git a/_freeze/R/friedman_test/figure-html/visualization-1.png b/_freeze/R/friedman_test/figure-html/visualization-1.png index 7fe7a5006..583c5f86e 100644 Binary files a/_freeze/R/friedman_test/figure-html/visualization-1.png and b/_freeze/R/friedman_test/figure-html/visualization-1.png differ diff --git a/_freeze/R/gee/execute-results/html.json b/_freeze/R/gee/execute-results/html.json index 31f8954fd..554c52503 100644 --- a/_freeze/R/gee/execute-results/html.json +++ b/_freeze/R/gee/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "a305492e63ea10896ae2081b01359f66", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Generalized Estimating Equations (GEE) methods in R\"\n---\n\n# INTRODUCTION\n\nGeneralized Estimating Equations (GEE) methods extend the Generalized Linear Model (GLM) framework using link functions that relate the predictors to transformed outcome variable. For dichotomous response variables, the link functions is the probit (in case of rare events complementary log-log may be preferable). For outcomes with more than two categories, the cumulative link function is used in case of ordinal variables and generalized logit for nominal variables.\n\nGEE are marginal models and therefore estimate population-averaged effects and within-subject correlation is analysed by specifying a working correlation structure (as in MMRM). Estimators are obtained via quasi-likelihood via iterative solving of estimating equations.\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) in the SAS/STAT Sample Program Library \\[1\\] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT, BASELINE, and VISIT were renamed to TRTP, BASE, and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. Data were created in SAS (See SAS section), and imported to R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#install.packages(\"readxl\")\nlibrary(readxl)\nresp <- read_excel(\"../data/resp_gee.xlsx\")\n```\n:::\n\n\nVariables OUTCOME, TRTP and AVISITN were converted to factors. Since the modeling functions use the first (alphabetically) level as the reference category, TRTP levels are ordered as 'P' (placebo) and 'A' (active), to ensure that placebo is used as the reference category in the models:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nresp$trtp<-factor(resp$trtp, levels=c('P', 'A'))\nresp$avisitn<-factor(resp$avisitn)\nresp$outcome<-factor(resp$outcome)\nresp$usubjid<-factor(resp$usubjid)\nresp$respnom<-factor(resp$respnom)\n```\n:::\n\n\nGEE models are run in the next section, including treatment, visit and visit by treatment interaction as fixed effects.\n\n# PACKAGES\n\n\n\n# BINARY OUTCOME\n\nBinary outcomes can be analyzed with `geepack::geelm` \\[2\\] and and `gee::gee` \\[3\\] functions. Independence correlation matrix is used by default in both functions, and can be modified in the `corstr=` argument.\n\nBoth functions estimate robust \"Sandwich\" standard errors (SE) by default, and the gee::gee function also returns the naive SE (model-based SE).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Create numeric outcome (values: 1/0).\nresp$outcome_num<-as.numeric(resp$outcome)-1\n\nmodel <- geepack::geeglm(outcome_num ~ trtp + avisitn +trtp*avisitn,\n id=usubjid,\n data=resp,\n family=binomial(link='logit'),\n corstr=\"independence\")\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\ngeepack::geeglm(formula = outcome_num ~ trtp + avisitn + trtp * \n avisitn, family = binomial(link = \"logit\"), data = resp, \n id = usubjid, corstr = \"independence\")\n\n Coefficients:\n Estimate Std.err Wald Pr(>|W|) \n(Intercept) -0.03509 0.26495 0.018 0.8946 \ntrtpA 0.81280 0.39503 4.234 0.0396 *\navisitn2 -0.42921 0.26357 2.652 0.1034 \navisitn3 -0.14080 0.29834 0.223 0.6370 \navisitn4 -0.21177 0.25344 0.698 0.4034 \ntrtpA:avisitn2 0.51651 0.41043 1.584 0.2082 \ntrtpA:avisitn3 0.31861 0.42839 0.553 0.4570 \ntrtpA:avisitn4 -0.11395 0.39485 0.083 0.7729 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation structure = independence \nEstimated Scale Parameters:\n\n Estimate Std.err\n(Intercept) 1 0.05045\nNumber of clusters: 111 Maximum cluster size: 4 \n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel<- gee::gee(outcome ~ trtp + avisitn + trtp*avisitn,\n id=usubjid,\n data=resp,\n family=binomial(link='logit'),\n corstr = \"independence\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nBeginning Cgee S-function, @(#) geeformula.q 4.13 98/01/27\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nrunning glm to get initial regression estimate\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n (Intercept) trtpA avisitn2 avisitn3 avisitn4 \n -0.03509 0.81280 -0.42921 -0.14080 -0.21177 \ntrtpA:avisitn2 trtpA:avisitn3 trtpA:avisitn4 \n 0.51651 0.31861 -0.11395 \n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n GEE: GENERALIZED LINEAR MODELS FOR DEPENDENT DATA\n gee S-function, version 4.13 modified 98/01/27 (1998) \n\nModel:\n Link: Logit \n Variance to Mean Relation: Binomial \n Correlation Structure: Independent \n\nCall:\ngee::gee(formula = outcome ~ trtp + avisitn + trtp * avisitn, \n id = usubjid, data = resp, family = binomial(link = \"logit\"), \n corstr = \"independence\")\n\nSummary of Residuals:\n Min 1Q Median 3Q Max \n-0.7222 -0.4561 0.2778 0.3889 0.6140 \n\n\nCoefficients:\n Estimate Naive S.E. Naive z Robust S.E. Robust z\n(Intercept) -0.03509 0.2674 -0.1312 0.2649 -0.1324\ntrtpA 0.81280 0.3986 2.0389 0.3950 2.0576\navisitn2 -0.42921 0.3832 -1.1200 0.2636 -1.6285\navisitn3 -0.14080 0.3788 -0.3717 0.2983 -0.4719\navisitn4 -0.21177 0.3795 -0.5580 0.2534 -0.8356\ntrtpA:avisitn2 0.51651 0.5699 0.9064 0.4104 1.2585\ntrtpA:avisitn3 0.31861 0.5700 0.5589 0.4284 0.7437\ntrtpA:avisitn4 -0.11395 0.5575 -0.2044 0.3948 -0.2886\n\nEstimated Scale Parameter: 1.018\nNumber of Iterations: 1\n\nWorking Correlation\n [,1] [,2] [,3] [,4]\n[1,] 1 0 0 0\n[2,] 0 1 0 0\n[3,] 0 0 1 0\n[4,] 0 0 0 1\n```\n\n\n:::\n:::\n\n\n### PROBABILITIES AND ODDS RATIO (OR)\n\nThe estimated probabilities of event and OR can be obtained using the `emmeans` function from a `geepack::geeglm` object. The code below computes probabilities and OR, along with p-values.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- geepack::geeglm(outcome_num ~ trtp + avisitn +trtp*avisitn,\n id=usubjid,\n data=resp,\n family=binomial(link='logit'),\n corstr=\"independence\")\n\n\n#Get predicted probabilities for each treatment.\n#Get predicted probabilities for each treatment, using the covariance matrix computed by the model\nprob <- emmeans::emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=vcov(model), type='response')\nprob\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trtp avisitn prob SE df lower.CL upper.CL\n P 1 0.491 0.0662 436 0.364 0.619\n A 1 0.685 0.0632 436 0.550 0.795\n P 2 0.386 0.0645 436 0.269 0.518\n A 2 0.704 0.0621 436 0.569 0.810\n P 3 0.456 0.0660 436 0.332 0.586\n A 3 0.722 0.0610 436 0.589 0.825\n P 4 0.439 0.0657 436 0.316 0.569\n A 4 0.611 0.0663 436 0.476 0.731\n\nCovariance estimate used: user-supplied \nConfidence level used: 0.95 \nIntervals are back-transformed from the logit scale \n```\n\n\n:::\n\n```{.r .cell-code}\n#Get differences between treatments by visit (option \"revpairwise\" is used to compare A vs P) \ndiffs_mean<-emmeans::emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=vcov(model))\ndiffs <- emmeans::contrast(diffs_mean,\"revpairwise\" , simple=\"trtp\")\ndiffs <- as.data.frame(diffs)\n\n#Calculate CI (alpha=0.05)\nalpha<-0.05\nz_crit <- qnorm(1 - alpha / 2)\ndiffs$low<-diffs$estimate - (z_crit*diffs$SE)\ndiffs$upp<-diffs$estimate + (z_crit*diffs$SE)\n\n#Get OR applying exponential transformation;\nor<-exp(diffs$estimate)\nor_low<-exp(diffs$low)\nor_upp<-exp(diffs$up)\n\n#Get two-sided p-value\nz <- diffs$estimate/diffs$SE\npvalue <- 2 * (1 - pnorm(z))\n\n#Create a dataset with all the results\nOR<-as.data.frame(cbind(diffs$avisitn, or,or_low, or_upp, z, round(pvalue, digits=4)))\ncolnames(OR)<-c('avisit', 'OR', 'lower.CL', 'upper.CL', 'Z', 'p-value')\nOR\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n avisit OR lower.CL upper.CL Z p-value\n1 1 2.254 1.0393 4.889 2.058 0.0396\n2 2 3.778 1.7132 8.333 3.294 0.0010\n3 3 3.100 1.4050 6.840 2.802 0.0051\n4 4 2.011 0.9435 4.288 1.809 0.0704\n```\n\n\n:::\n:::\n\n\n# OUTCOME WITH MORE THAN TWO CATEGORIES\n\nThe functions used for binary outcomes above do not support outcomes with more than two categories, as these functions relay on the family function, which does not include the multinomial option. Nevertheless, the multgee package \\[4\\] provides two functions for estimating GEE models when the outcome has more than two categories:\n\nThe correlation matrix by default is \"Exchangeable\", and it can be modified using the `LORstr=` argument in both functions.\n\n### Ordinal variable\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr=\"time.exch\",\n )\n\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR ORDINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Cumulative logit \n\nLocal Odds Ratios:\nStructure: time.exch\nModel: 3way\nHomogenous scores: TRUE\n\ncall:\nmultgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"time.exch\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n-0.47035 -0.35422 -0.28544 0.00032 0.63348 0.72789 \n\nNumber of Iterations: 3 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|) \nbeta10 -0.5472 0.2417 -2.26 0.024 *\nbeta20 0.6304 0.2551 2.47 0.013 *\ntrtpA 0.4284 0.3432 1.25 0.212 \navisitn2 -0.0024 0.3668 -0.01 0.995 \navisitn3 0.1156 0.3228 0.36 0.720 \navisitn4 -0.1470 0.3323 -0.44 0.658 \ntrtpA:avisitn2 -0.2717 0.5094 -0.53 0.594 \ntrtpA:avisitn3 -0.5974 0.4619 -1.29 0.196 \ntrtpA:avisitn4 -0.3269 0.4488 -0.73 0.466 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0.000 0.000 1.000 0.999 1.000 0.999 1.000 0.999\n[2,] 0.000 0.000 0.999 1.281 0.999 1.281 0.999 1.281\n[3,] 1.000 0.999 0.000 0.000 1.000 0.999 1.000 0.999\n[4,] 0.999 1.281 0.000 0.000 0.999 1.281 0.999 1.281\n[5,] 1.000 0.999 1.000 0.999 0.000 0.000 1.000 0.999\n[6,] 0.999 1.281 0.999 1.281 0.000 0.000 0.999 1.281\n[7,] 1.000 0.999 1.000 0.999 1.000 0.999 0.000 0.000\n[8,] 0.999 1.281 0.999 1.281 0.999 1.281 0.000 0.000\n\np-value of Null model: 0.69 \n```\n\n\n:::\n:::\n\n\n### Nominal variable\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr = \"time.exch\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in vglm.fitter(x = x, y = y, w = w, offset = offset, Xm2 = Xm2, : some\nquantities such as z, residuals, SEs may be inaccurate due to convergence at a\nhalf-step\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR NOMINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Baseline Category Logit \n\nLocal Odds Ratios:\nStructure: time.exch\nModel: 3way\nHomogenous scores: TRUE\n\ncall:\nmultgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"time.exch\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n -0.444 -0.368 -0.333 0.000 0.611 0.778 \n\nNumber of Iterations: 1 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|)\nbeta10 0.2231 0.3354 0.67 0.51\ntrtpA:1 -0.6286 0.5014 -1.25 0.21\navisitn2:1 0.1823 0.4310 0.42 0.67\navisitn3:1 0.0132 0.4786 0.03 0.98\navisitn4:1 -0.0690 0.4814 -0.14 0.89\ntrtpA:avisitn2:1 0.1625 0.6342 0.26 0.80\ntrtpA:avisitn3:1 0.9518 0.6979 1.36 0.17\ntrtpA:avisitn4:1 0.6463 0.7006 0.92 0.36\nbeta20 0.2719 0.3318 0.82 0.41\ntrtpA:2 0.0158 0.4553 0.03 0.97\navisitn2:2 0.1800 0.4254 0.42 0.67\navisitn3:2 0.1555 0.4660 0.33 0.74\navisitn4:2 -0.2719 0.5079 -0.54 0.59\ntrtpA:avisitn2:2 -0.2564 0.6025 -0.43 0.67\ntrtpA:avisitn3:2 0.1164 0.6469 0.18 0.86\ntrtpA:avisitn4:2 0.1561 0.6399 0.24 0.81\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0.000 0.000 1.279 1.001 1.279 1.001 1.279 1.001\n[2,] 0.000 0.000 1.001 1.000 1.001 1.000 1.001 1.000\n[3,] 1.279 1.001 0.000 0.000 1.279 1.001 1.279 1.001\n[4,] 1.001 1.000 0.000 0.000 1.001 1.000 1.001 1.000\n[5,] 1.279 1.001 1.279 1.001 0.000 0.000 1.279 1.001\n[6,] 1.001 1.000 1.001 1.000 0.000 0.000 1.001 1.000\n[7,] 1.279 1.001 1.279 1.001 1.279 1.001 0.000 0.000\n[8,] 1.001 1.000 1.001 1.000 1.001 1.000 0.000 0.000\n\np-value of Null model: 0.28 \n```\n\n\n:::\n:::\n\n\n# REFERENCES\n\n\\[1\\] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n\\[2\\] [Generalized Estimating Equation Package](https://cran.r-project.org/web/packages/geepack/geepack.pdf)\n\n\\[3\\] [Generalized Estimation Equation Solver](https://cran.r-project.org/web/packages/gee/gee.pdf)\n\n\\[4\\] [Touloumis A. (2015). \"R Package multgee: A Generalized Estimating Equations Solver for Multinomial Responses.\" Journal of Statistical Software.](https://www.jstatsoft.org/article/view/v064i08)\n", + "markdown": "---\ntitle: \"Generalized Estimating Equations (GEE) methods in R\"\n---\n\n# INTRODUCTION\n\nGeneralized Estimating Equations (GEE) methods extend the Generalized Linear Model (GLM) framework using link functions that relate the predictors to transformed outcome variable. For dichotomous response variables, the link functions is the probit (in case of rare events complementary log-log may be preferable). For outcomes with more than two categories, the cumulative link function is used in case of ordinal variables and generalized logit for nominal variables.\n\nGEE are marginal models and therefore estimate population-averaged effects and within-subject correlation is analysed by specifying a working correlation structure (as in MMRM). Estimators are obtained via quasi-likelihood via iterative solving of estimating equations.\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) in the SAS/STAT Sample Program Library \\[1\\] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT, BASELINE, and VISIT were renamed to TRTP, BASE, and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. Data were created in SAS (See SAS section), and imported to R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#install.packages(\"readxl\")\nlibrary(readxl)\nresp <- read_excel(\"../data/resp_gee.xlsx\")\n```\n:::\n\n\nVariables OUTCOME, TRTP and AVISITN were converted to factors. Since the modeling functions use the first (alphabetically) level as the reference category, TRTP levels are ordered as 'P' (placebo) and 'A' (active), to ensure that placebo is used as the reference category in the models:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nresp$trtp<-factor(resp$trtp, levels=c('P', 'A'))\nresp$avisitn<-factor(resp$avisitn)\nresp$outcome<-factor(resp$outcome)\nresp$usubjid<-factor(resp$usubjid)\nresp$respnom<-factor(resp$respnom)\n```\n:::\n\n\nGEE models are run in the next section, including treatment, visit and visit by treatment interaction as fixed effects.\n\n# PACKAGES\n\n\n\n# BINARY OUTCOME\n\nBinary outcomes can be analyzed with `geepack::geelm` \\[2\\] and and `gee::gee` \\[3\\] functions. Independence correlation matrix is used by default in both functions, and can be modified in the `corstr=` argument.\n\nBoth functions estimate robust \"Sandwich\" standard errors (SE) by default, and the gee::gee function also returns the naive SE (model-based SE).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Create numeric outcome (values: 1/0).\nresp$outcome_num<-as.numeric(resp$outcome)-1\n\nmodel <- geepack::geeglm(outcome_num ~ trtp + avisitn +trtp*avisitn,\n id=usubjid,\n data=resp,\n family=binomial(link='logit'),\n corstr=\"independence\")\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\ngeepack::geeglm(formula = outcome_num ~ trtp + avisitn + trtp * \n avisitn, family = binomial(link = \"logit\"), data = resp, \n id = usubjid, corstr = \"independence\")\n\n Coefficients:\n Estimate Std.err Wald Pr(>|W|) \n(Intercept) -0.03509 0.26495 0.018 0.8946 \ntrtpA 0.81280 0.39503 4.234 0.0396 *\navisitn2 -0.42921 0.26357 2.652 0.1034 \navisitn3 -0.14080 0.29834 0.223 0.6370 \navisitn4 -0.21177 0.25344 0.698 0.4034 \ntrtpA:avisitn2 0.51651 0.41043 1.584 0.2082 \ntrtpA:avisitn3 0.31861 0.42839 0.553 0.4570 \ntrtpA:avisitn4 -0.11395 0.39485 0.083 0.7729 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation structure = independence \nEstimated Scale Parameters:\n\n Estimate Std.err\n(Intercept) 1 0.05045\nNumber of clusters: 111 Maximum cluster size: 4 \n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel<- gee::gee(outcome ~ trtp + avisitn + trtp*avisitn,\n id=usubjid,\n data=resp,\n family=binomial(link='logit'),\n corstr = \"independence\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nBeginning Cgee S-function, @(#) geeformula.q 4.13 98/01/27\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nrunning glm to get initial regression estimate\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n (Intercept) trtpA avisitn2 avisitn3 avisitn4 \n -0.03509 0.81280 -0.42921 -0.14080 -0.21177 \ntrtpA:avisitn2 trtpA:avisitn3 trtpA:avisitn4 \n 0.51651 0.31861 -0.11395 \n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n GEE: GENERALIZED LINEAR MODELS FOR DEPENDENT DATA\n gee S-function, version 4.13 modified 98/01/27 (1998) \n\nModel:\n Link: Logit \n Variance to Mean Relation: Binomial \n Correlation Structure: Independent \n\nCall:\ngee::gee(formula = outcome ~ trtp + avisitn + trtp * avisitn, \n id = usubjid, data = resp, family = binomial(link = \"logit\"), \n corstr = \"independence\")\n\nSummary of Residuals:\n Min 1Q Median 3Q Max \n-0.7222 -0.4561 0.2778 0.3889 0.6140 \n\n\nCoefficients:\n Estimate Naive S.E. Naive z Robust S.E. Robust z\n(Intercept) -0.03509 0.2674 -0.1312 0.2649 -0.1324\ntrtpA 0.81280 0.3986 2.0389 0.3950 2.0576\navisitn2 -0.42921 0.3832 -1.1200 0.2636 -1.6285\navisitn3 -0.14080 0.3788 -0.3717 0.2983 -0.4719\navisitn4 -0.21177 0.3795 -0.5580 0.2534 -0.8356\ntrtpA:avisitn2 0.51651 0.5699 0.9064 0.4104 1.2585\ntrtpA:avisitn3 0.31861 0.5700 0.5589 0.4284 0.7437\ntrtpA:avisitn4 -0.11395 0.5575 -0.2044 0.3948 -0.2886\n\nEstimated Scale Parameter: 1.018\nNumber of Iterations: 1\n\nWorking Correlation\n [,1] [,2] [,3] [,4]\n[1,] 1 0 0 0\n[2,] 0 1 0 0\n[3,] 0 0 1 0\n[4,] 0 0 0 1\n```\n\n\n:::\n:::\n\n\n### PROBABILITIES AND ODDS RATIO (OR)\n\nThe estimated probabilities of event and OR can be obtained using the `emmeans` function from a `geepack::geeglm` object. The code below computes probabilities and OR, along with p-values.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- geepack::geeglm(outcome_num ~ trtp + avisitn +trtp*avisitn,\n id=usubjid,\n data=resp,\n family=binomial(link='logit'),\n corstr=\"independence\")\n\n\n#Get predicted probabilities for each treatment.\n#Get predicted probabilities for each treatment, using the covariance matrix computed by the model\nprob <- emmeans::emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=vcov(model), type='response')\nprob\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trtp avisitn prob SE df lower.CL upper.CL\n P 1 0.491 0.0662 436 0.364 0.619\n A 1 0.685 0.0632 436 0.550 0.795\n P 2 0.386 0.0645 436 0.269 0.518\n A 2 0.704 0.0621 436 0.569 0.810\n P 3 0.456 0.0660 436 0.332 0.586\n A 3 0.722 0.0610 436 0.589 0.825\n P 4 0.439 0.0657 436 0.316 0.569\n A 4 0.611 0.0663 436 0.476 0.731\n\nCovariance estimate used: user-supplied \nConfidence level used: 0.95 \nIntervals are back-transformed from the logit scale \n```\n\n\n:::\n\n```{.r .cell-code}\n#Get differences between treatments by visit (option \"revpairwise\" is used to compare A vs P) \ndiffs_mean<-emmeans::emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=vcov(model))\ndiffs <- emmeans::contrast(diffs_mean,\"revpairwise\" , simple=\"trtp\")\ndiffs <- as.data.frame(diffs)\n\n#Calculate CI (alpha=0.05)\nalpha<-0.05\nz_crit <- qnorm(1 - alpha / 2)\ndiffs$low<-diffs$estimate - (z_crit*diffs$SE)\ndiffs$upp<-diffs$estimate + (z_crit*diffs$SE)\n\n#Get OR applying exponential transformation;\nor<-exp(diffs$estimate)\nor_low<-exp(diffs$low)\nor_upp<-exp(diffs$up)\n\n#Get two-sided p-value\nz <- diffs$estimate/diffs$SE\npvalue <- 2 * (1 - pnorm(z))\n\n#Create a dataset with all the results\nOR<-as.data.frame(cbind(diffs$avisitn, or,or_low, or_upp, z, round(pvalue, digits=4)))\ncolnames(OR)<-c('avisit', 'OR', 'lower.CL', 'upper.CL', 'Z', 'p-value')\nOR\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n avisit OR lower.CL upper.CL Z p-value\n1 1 2.254 1.0393 4.889 2.058 0.0396\n2 2 3.778 1.7132 8.333 3.294 0.0010\n3 3 3.100 1.4050 6.840 2.802 0.0051\n4 4 2.011 0.9435 4.288 1.809 0.0704\n```\n\n\n:::\n:::\n\n\n# OUTCOME WITH MORE THAN TWO CATEGORIES\n\nThe functions used for binary outcomes above do not support outcomes with more than two categories, as these functions relay on the family function, which does not include the multinomial option. Nevertheless, the multgee package \\[4\\] provides two functions for estimating GEE models when the outcome has more than two categories:\n\nThe correlation matrix by default is \"Exchangeable\", and it can be modified using the `LORstr=` argument in both functions.\n\n### Ordinal variable\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr=\"time.exch\",\n )\n\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR ORDINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Cumulative logit \n\nLocal Odds Ratios:\nStructure: time.exch\nModel: 3way\nHomogenous scores: TRUE\n\ncall:\nmultgee::ordLORgee(formula = respord ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"time.exch\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n-0.47035 -0.35422 -0.28544 0.00032 0.63348 0.72789 \n\nNumber of Iterations: 3 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|) \nbeta10 -0.5472 0.2417 -2.26 0.024 *\nbeta20 0.6304 0.2551 2.47 0.013 *\ntrtpA 0.4284 0.3432 1.25 0.212 \navisitn2 -0.0024 0.3668 -0.01 0.995 \navisitn3 0.1156 0.3228 0.36 0.720 \navisitn4 -0.1470 0.3323 -0.44 0.658 \ntrtpA:avisitn2 -0.2717 0.5094 -0.53 0.594 \ntrtpA:avisitn3 -0.5974 0.4619 -1.29 0.196 \ntrtpA:avisitn4 -0.3269 0.4488 -0.73 0.466 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0.000 0.000 1.000 0.999 1.000 0.999 1.000 0.999\n[2,] 0.000 0.000 0.999 1.281 0.999 1.281 0.999 1.281\n[3,] 1.000 0.999 0.000 0.000 1.000 0.999 1.000 0.999\n[4,] 0.999 1.281 0.000 0.000 0.999 1.281 0.999 1.281\n[5,] 1.000 0.999 1.000 0.999 0.000 0.000 1.000 0.999\n[6,] 0.999 1.281 0.999 1.281 0.000 0.000 0.999 1.281\n[7,] 1.000 0.999 1.000 0.999 1.000 0.999 0.000 0.000\n[8,] 0.999 1.281 0.999 1.281 0.999 1.281 0.000 0.000\n\np-value of Null model: 0.69 \n```\n\n\n:::\n:::\n\n\n### Nominal variable\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- multgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp*avisitn,\n data = resp,\n id = usubjid,\n repeated = avisitn,\n LORstr = \"time.exch\")\n \n \nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGEE FOR NOMINAL MULTINOMIAL RESPONSES \nversion 1.6.0 modified 2017-07-10 \n\nLink : Baseline Category Logit \n\nLocal Odds Ratios:\nStructure: time.exch\nModel: 3way\nHomogenous scores: TRUE\n\ncall:\nmultgee::nomLORgee(formula = respnom ~ trtp + avisitn + trtp * \n avisitn, data = resp, id = usubjid, repeated = avisitn, LORstr = \"time.exch\")\n\nSummary of residuals:\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n -0.444 -0.368 -0.333 0.000 0.611 0.778 \n\nNumber of Iterations: 1 \n\nCoefficients:\n Estimate san.se san.z Pr(>|san.z|)\nbeta10 0.2231 0.3354 0.67 0.51\ntrtpA:1 -0.6286 0.5014 -1.25 0.21\navisitn2:1 0.1823 0.4310 0.42 0.67\navisitn3:1 0.0132 0.4786 0.03 0.98\navisitn4:1 -0.0690 0.4814 -0.14 0.89\ntrtpA:avisitn2:1 0.1625 0.6342 0.26 0.80\ntrtpA:avisitn3:1 0.9518 0.6979 1.36 0.17\ntrtpA:avisitn4:1 0.6463 0.7006 0.92 0.36\nbeta20 0.2719 0.3318 0.82 0.41\ntrtpA:2 0.0158 0.4553 0.03 0.97\navisitn2:2 0.1800 0.4254 0.42 0.67\navisitn3:2 0.1555 0.4660 0.33 0.74\navisitn4:2 -0.2719 0.5079 -0.54 0.59\ntrtpA:avisitn2:2 -0.2564 0.6025 -0.43 0.67\ntrtpA:avisitn3:2 0.1164 0.6469 0.18 0.86\ntrtpA:avisitn4:2 0.1561 0.6399 0.24 0.81\n\nLocal Odds Ratios Estimates:\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 0.000 0.000 1.279 1.001 1.279 1.001 1.279 1.001\n[2,] 0.000 0.000 1.001 1.000 1.001 1.000 1.001 1.000\n[3,] 1.279 1.001 0.000 0.000 1.279 1.001 1.279 1.001\n[4,] 1.001 1.000 0.000 0.000 1.001 1.000 1.001 1.000\n[5,] 1.279 1.001 1.279 1.001 0.000 0.000 1.279 1.001\n[6,] 1.001 1.000 1.001 1.000 0.000 0.000 1.001 1.000\n[7,] 1.279 1.001 1.279 1.001 1.279 1.001 0.000 0.000\n[8,] 1.001 1.000 1.001 1.000 1.001 1.000 0.000 0.000\n\np-value of Null model: 0.28 \n```\n\n\n:::\n:::\n\n\n# REFERENCES\n\n\\[1\\] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n\\[2\\] [Generalized Estimating Equation Package](https://cran.r-project.org/web/packages/geepack/geepack.pdf)\n\n\\[3\\] [Generalized Estimation Equation Solver](https://cran.r-project.org/web/packages/gee/gee.pdf)\n\n\\[4\\] [Touloumis A. (2015). \"R Package multgee: A Generalized Estimating Equations Solver for Multinomial Responses.\" Journal of Statistical Software.](https://www.jstatsoft.org/article/view/v064i08)\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/glmm/execute-results/html.json b/_freeze/R/glmm/execute-results/html.json index af6a8a670..414fce0a3 100644 --- a/_freeze/R/glmm/execute-results/html.json +++ b/_freeze/R/glmm/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "743bb7d1890965b64572ba5decdfceab", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Generalized Linear Mixed Models (GLMM)\"\n---\n\n# INTRODUCTION\n\nGeneralized Linear Mixed Models (GLMM) method combines the characteristics of the Generalized Linear Model (GLM), with mixed models (such a repeated measures over time). It extends the GLM framework using link functions that relate the predictors to transformed outcome variable.\n\n$$\nE(Y)=\\mu\n$$\n\n$$\ng(\\mu) = X\\beta + Zb, \\qquad b \\sim N(0, G)\n$$\n\nWhere:\n\nn: number of observations, p: number of fixed effects, q: number of random effects (subjects).\n\nY: vector of observed response variable (n x 1)\n\ng: Link function that transforms Y to the linear scale (eg: logit)\n\nX: matrix for fixed effects (n x p), Z: matrix of random effects, G: covariance matrix of the random effects.\n\nB: vector of fixed effects coefficients (p x 1)., b: vector of random effects.\n\n**Link Function:**\n\n- Dichotomous response variable: probit (in case of rare events complementary log-log may be preferable).\n\n- Outcomes with more than two categories:\n\n - Ordinal variable: cumulative\n\n - Nominal variable: generalized logit\n\n**Random Effects**\n\nGLMM are conditional models and estimate subject-average effects, and the intra-subject correlation is modelled via random effects. Unlike GEE models, GLMM models allow individual-level inference.\n\n**Estimation Methods**\n\nMaximum likelihood, based on approximations:\n\n- Gauss Hermite Quadrature (GHQ): Integral split in a given number of points.\n\n- Laplace: A specific case of GHQ, using 1 point.\n\nPenalized Likelihood can be used too, but it is known that in binary data it underestimates variance components and biased results.\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) \\[1\\] in the SAS/STAT Sample Program Library \\[1\\] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT, BASELINE, and VISIT were renamed to TRTP, BASE, and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. Data were created in SAS (See SAS section), and imported in R.\n\nThe variables OUTCOME, TRTP, RESPORD, USUBJID and AVISITN were converted to factors. Since the modeling functions use the first (alphabetically) level as the reference category, TRT levels are ordered as 'P' (placebo) and 'A' (active), to ensure that placebo is used as the reference category in the models:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(readxl)\nresp <- read_excel(\"../data/resp_gee.xlsx\")\nresp$trtp<-factor(resp$trtp, levels=c('P', 'A')) \nresp$avisitn<-factor(resp$avisitn) \nresp$outcome<-factor(resp$outcome)\nresp$usubjid<-factor(resp$usubjid)\nresp$respord<-factor(resp$respord)\n```\n:::\n\n\n# PACKAGES\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(lme4)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: Matrix\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(GLMMadaptive)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'GLMMadaptive'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:lme4':\n\n negative.binomial\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(merDeriv)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: nonnest2\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThis is nonnest2 0.5-8.\nnonnest2 has not been tested with all combinations of supported model classes.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: sandwich\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: lavaan\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThis is lavaan 0.6-21\nlavaan is FREE software! Please report any bugs.\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(parameters)\nlibrary(glmmTMB)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'glmmTMB'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:sandwich':\n\n meatHC, sandwich\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(clubSandwich)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nRegistered S3 methods overwritten by 'clubSandwich':\n method from \n bread.lmerMod merDeriv\n bread.mlm sandwich\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(MASS)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'MASS'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:GLMMadaptive':\n\n negative.binomial\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(emmeans)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWelcome to emmeans.\nCaution: You lose important information if you filter this package's results.\nSee '? untidy'\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(ordinal)\n```\n:::\n\n\n# GLMM WITH GHQ\n\nGLMM with GHQ approximation can be fitted using `lme4::glmer` and`GLMMadaptative::mixed_model`R functions.\n\nIn GLMMs, intra‑subject (within‑cluster) correlation is captured through the inclusion of random effects. Both functions estimate this correlation by assuming normally distributed random effects with infinite degrees of freedom and, by default, model the random‑effects covariance matrix GGG using a Variance Components (VC) structure.\n\n### lme::glmer\n\nThe syntax to fit a GLMM using GHQ with `lme4::glme` is displayed below, the random effects are specified as `1|usubjid` , where the number 1 denotes a random intercept (different baseline for each individual). By specifying `nAQG=` the GHQ method is applied with the specified number of points (in this example n=5).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- lme4::glmer(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=5)\n\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGeneralized linear mixed model fit by maximum likelihood (Adaptive\n Gauss-Hermite Quadrature, nAGQ = 5) [glmerMod]\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\n Data: resp\n\n AIC BIC logLik -2*log(L) df.resid \n 487.2 524.0 -234.6 469.2 435 \n\nScaled residuals: \n Min 1Q Median 3Q Max \n-2.1952 -0.3744 0.2062 0.4089 2.1081 \n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.963 2.639 \nNumber of obs: 444, groups: usubjid, 111\n\nFixed effects:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.06177 0.52741 -0.117 0.9068 \ntrtpA 1.57505 0.78352 2.010 0.0444 *\navisitn2 -0.87192 0.54763 -1.592 0.1113 \navisitn3 -0.28729 0.53694 -0.535 0.5926 \navisitn4 -0.43176 0.53860 -0.802 0.4228 \ntrtpA:avisitn2 1.04386 0.80358 1.299 0.1939 \ntrtpA:avisitn3 0.63613 0.79997 0.795 0.4265 \ntrtpA:avisitn4 -0.22003 0.78660 -0.280 0.7797 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation of Fixed Effects:\n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.676 \navisitn2 -0.497 0.317 \navisitn3 -0.505 0.334 0.496 \navisitn4 -0.503 0.330 0.499 0.500 \ntrtpA:vstn2 0.338 -0.482 -0.683 -0.339 -0.341 \ntrtpA:vstn3 0.338 -0.488 -0.336 -0.672 -0.337 0.495 \ntrtpA:vstn4 0.346 -0.518 -0.334 -0.340 -0.681 0.495 0.496\n```\n\n\n:::\n:::\n\n\n### GLMMadapive::mixed_model\n\nThe syntax using `GLMMadaptative::mixed_model` is similar, but the random effects are specified by using the `random` argument.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- GLMMadaptive::mixed_model(fixed = outcome ~ trtp + avisitn + trtp*avisitn,\n random = ~1|usubjid,\n data = resp,\n family = binomial(link = \"logit\"),\n nAGQ=5)\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nGLMMadaptive::mixed_model(fixed = outcome ~ trtp + avisitn + \n trtp * avisitn, random = ~1 | usubjid, data = resp, family = binomial(link = \"logit\"), \n nAGQ = 5)\n\nData Descriptives:\nNumber of Observations: 444\nNumber of Groups: 111 \n\nModel:\n family: binomial\n link: logit \n\nFit statistics:\n log.Lik AIC BIC\n -234.5954 487.1909 511.5767\n\nRandom effects covariance matrix:\n StdDev\n(Intercept) 2.680307\n\nFixed effects:\n Estimate Std.Err z-value p-value\n(Intercept) -0.0760 0.5806 -0.1308 0.89590\ntrtpA 1.6388 0.8728 1.8776 0.06044\navisitn2 -0.8746 0.5475 -1.5974 0.11017\navisitn3 -0.2890 0.5374 -0.5377 0.59080\navisitn4 -0.4335 0.5390 -0.8043 0.42123\ntrtpA:avisitn2 1.0461 0.8031 1.3026 0.19272\ntrtpA:avisitn3 0.6375 0.8000 0.7969 0.42553\ntrtpA:avisitn4 -0.2188 0.7875 -0.2778 0.78118\n\nIntegration:\nmethod: adaptive Gauss-Hermite quadrature rule\nquadrature points: 5\n\nOptimization:\nmethod: hybrid EM and quasi-Newton\nconverged: TRUE \n```\n\n\n:::\n:::\n\n\nResults produced by the mixed_model function show slight deviations when compared to glmer, suggesting differences in the model implementation or estimation.\n\nEven with Gauss–Hermite quadrature in both, **lme4::glmer** and **GLMMadaptive::mixed_model** use **different likelihood parameterizations, adaptive quadrature implementations, and optimization routines**, so they don’t evaluate the exact same objective and can yield different estimates (especially for non-Gaussian outcomes or sparse data). In practice, `mixed_model()` tends to provide **more accurate marginal likelihood integration** (subject-specific adaptive scaling), while `glmer()` is **faster but more approximate**, which can bias variance components and slightly shift fixed effects \\[2\\].\n\n# GLMM WITH LAPLACE\n\nLaplace is a particular GHQ where only one point is used. In R, `lme4::glmer` can also be used to compute Laplace approximation, but not `GLMMadaptative::mixed_model`. The `glmmTMB::glmmTMB`function is also a viable alternative.\n\n### lme::glmer\n\nThe syntax using `lme4::glmer` is the same as in GHQ, but without specifying the number of points:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- lme4::glmer(formula =outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link =\"logit\"))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in checkConv(attr(opt, \"derivs\"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00926468 (tol = 0.002, component 1)\n See ?lme4::convergence and ?lme4::troubleshooting.\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGeneralized linear mixed model fit by maximum likelihood (Laplace\n Approximation) [glmerMod]\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\n Data: resp\n\n AIC BIC logLik -2*log(L) df.resid \n 493.4 530.2 -237.7 475.4 435 \n\nScaled residuals: \n Min 1Q Median 3Q Max \n-2.1946 -0.3899 0.2097 0.4216 2.0956 \n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.433 2.536 \nNumber of obs: 444, groups: usubjid, 111\n\nFixed effects:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.08269 0.52044 -0.159 0.8738 \ntrtpA 1.64646 0.78036 2.110 0.0349 *\navisitn2 -0.85805 0.54287 -1.581 0.1140 \navisitn3 -0.28067 0.53169 -0.528 0.5976 \navisitn4 -0.42263 0.53331 -0.792 0.4281 \ntrtpA:avisitn2 1.02843 0.79405 1.295 0.1953 \ntrtpA:avisitn3 0.62119 0.79067 0.786 0.4321 \ntrtpA:avisitn4 -0.20289 0.77543 -0.262 0.7936 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation of Fixed Effects:\n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.675 \navisitn2 -0.497 0.310 \navisitn3 -0.508 0.331 0.495 \navisitn4 -0.506 0.327 0.497 0.500 \ntrtpA:vstn2 0.339 -0.474 -0.685 -0.339 -0.340 \ntrtpA:vstn3 0.340 -0.482 -0.336 -0.674 -0.338 0.493 \ntrtpA:vstn4 0.350 -0.510 -0.336 -0.342 -0.685 0.496 0.496\noptimizer (Nelder_Mead) convergence code: 0 (OK)\nModel failed to converge with max|grad| = 0.00926468 (tol = 0.002, component 1)\n See ?lme4::convergence and ?lme4::troubleshooting.\n```\n\n\n:::\n:::\n\n\nThis initial attempt using `lme4::glmer`resulted in a convergence warning, which can be addressed by switching the optimizer to \"bobyqa\" and extending the maximum number of iterations, as shown in the code below. These adjustments not only suppress the warning but provides closer results to SAS \\[3\\].\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- lme4::glmer(formula =outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=1,\n control=lme4::glmerControl(optimizer=\"bobyqa\", \n optCtrl=list(maxfun=100000)))\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGeneralized linear mixed model fit by maximum likelihood (Laplace\n Approximation) [glmerMod]\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\n Data: resp\nControl: \nlme4::glmerControl(optimizer = \"bobyqa\", optCtrl = list(maxfun = 1e+05))\n\n AIC BIC logLik -2*log(L) df.resid \n 493.4 530.2 -237.7 475.4 435 \n\nScaled residuals: \n Min 1Q Median 3Q Max \n-2.1964 -0.3901 0.2097 0.4213 2.0951 \n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.44 2.538 \nNumber of obs: 444, groups: usubjid, 111\n\nFixed effects:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.08475 0.52067 -0.163 0.8707 \ntrtpA 1.65139 0.78093 2.115 0.0345 *\navisitn2 -0.85752 0.54293 -1.579 0.1142 \navisitn3 -0.28186 0.53178 -0.530 0.5961 \navisitn4 -0.42356 0.53339 -0.794 0.4271 \ntrtpA:avisitn2 1.02468 0.79414 1.290 0.1969 \ntrtpA:avisitn3 0.62207 0.79088 0.787 0.4315 \ntrtpA:avisitn4 -0.20495 0.77559 -0.264 0.7916 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation of Fixed Effects:\n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.675 \navisitn2 -0.497 0.310 \navisitn3 -0.508 0.331 0.495 \navisitn4 -0.506 0.326 0.497 0.500 \ntrtpA:vstn2 0.339 -0.473 -0.685 -0.339 -0.340 \ntrtpA:vstn3 0.340 -0.482 -0.336 -0.673 -0.338 0.493 \ntrtpA:vstn4 0.350 -0.510 -0.336 -0.342 -0.685 0.496 0.496\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- glmer(formula =outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=1,\n control=glmerControl(optimizer=\"bobyqa\", \n optCtrl=list(maxfun=100000)))\n```\n:::\n\n\n### glmmTMB::glmmTMB\n\nThe `glmmTMB::glmmTMB` function is also a viable alternative for Laplace approximation, with similar syntax to `lme4::glmer`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel<- glmmTMB::glmmTMB(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link = \"logit\"))\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\nData: resp\n\n AIC BIC logLik -2*log(L) df.resid \n 493.4 530.2 -237.7 475.4 435 \n\nRandom effects:\n\nConditional model:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.442 2.538 \nNumber of obs: 444, groups: usubjid, 111\n\nConditional model:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.08476 0.52073 -0.163 0.8707 \ntrtpA 1.65178 0.78148 2.114 0.0345 *\navisitn2 -0.85758 0.54300 -1.579 0.1143 \navisitn3 -0.28190 0.53181 -0.530 0.5961 \navisitn4 -0.42359 0.53344 -0.794 0.4271 \ntrtpA:avisitn2 1.02474 0.79432 1.290 0.1970 \ntrtpA:avisitn3 0.62216 0.79103 0.786 0.4316 \ntrtpA:avisitn4 -0.20503 0.77578 -0.264 0.7916 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\nAlthough both `glmmTMB` and `lme4::glmer` implement the Laplace approximation for fitting GLMMs, they rely on different numerical frameworks and optimization strategies. `glmmTMB` uses TMB with automatic differentiation and its own Laplace implementation, whereas `glmer` uses lme4’s custom penalized deviance formulation and derivative‑free optimizers. As a result, the approximated likelihoods, gradients, and optimization paths differ, leading to non‑identical parameter estimates even under the same model specification. \\[2\\]\n\n# PENALIZED QUASI-LIKELIHOOD (PQL)\n\nThe PQL approach uses linear approximations instead of likelihood, making it **less accurate for binary outcomes compared to the GHQ or Laplace** methods described above. PQL computation can be obtained using the `glmmPQL` function form the MASS package, using the random argument to specify the random factors.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- MASS::glmmPQL(outcome ~ trtp + avisitn + trtp*avisitn,\n random=list(~1|usubjid),\n data = resp,\n family = binomial(link = \"logit\"))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 1\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 2\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 3\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 4\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 5\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 6\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nLinear mixed-effects model fit by maximum likelihood\n Data: resp \n AIC BIC logLik\n NA NA NA\n\nRandom effects:\n Formula: ~1 | usubjid\n (Intercept) Residual\nStdDev: 2.240666 0.7414888\n\nVariance function:\n Structure: fixed weights\n Formula: ~invwt \nFixed effects: outcome ~ trtp + avisitn + trtp * avisitn \n Value Std.Error DF t-value p-value\n(Intercept) 0.0000080 0.4125606 327 0.0000194 1.0000\ntrtpA 1.2202691 0.6029058 109 2.0239797 0.0454\navisitn2 -0.8149934 0.3942084 327 -2.0674177 0.0395\navisitn3 -0.2682000 0.3879416 327 -0.6913412 0.4898\navisitn4 -0.4033782 0.3890430 327 -1.0368474 0.3006\ntrtpA:avisitn2 0.9759879 0.5795035 327 1.6841796 0.0931\ntrtpA:avisitn3 0.5950261 0.5780995 327 1.0292797 0.3041\ntrtpA:avisitn4 -0.1998455 0.5678652 327 -0.3519241 0.7251\n Correlation: \n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.684 \navisitn2 -0.464 0.317 \navisitn3 -0.467 0.319 0.495 \navisitn4 -0.466 0.319 0.496 0.498 \ntrtpA:avisitn2 0.315 -0.469 -0.680 -0.337 -0.338 \ntrtpA:avisitn3 0.313 -0.467 -0.332 -0.671 -0.334 0.492 \ntrtpA:avisitn4 0.319 -0.485 -0.340 -0.341 -0.685 0.500 0.497\n\nStandardized Within-Group Residuals:\n Min Q1 Med Q3 Max \n-2.8440535 -0.4341097 0.2723992 0.4824627 2.8108150 \n\nNumber of Observations: 444\nNumber of Groups: 111 \n```\n\n\n:::\n:::\n\n\n**Note:** `glmmPQL` is widely recognized as **less reliable for binary outcomes**, more robust approaches such as Laplace or GHQ discussed in previous sections are generally preferred.\n\n# SANDWICH SE AND DEGREES OF FREEDOM (DDFF)\n\nPrevious results are computed using default outputs, so naive SE and infinite ddff are used. However, Li. P. and Redden, D.T. (2015) \\[4\\], suggested using the Between-Within denominator degrees of freedom approximation method when using GLMMs in randomized trials with binary outcomes and small sample size.\n\nAdditionally, FDA \\[5\\] advises \"sponsors to consider using of robust SE method such as the Huber-White \"sandwich\" SE, particularly when the model does not include treatment by covariate interaction.\n\nIn R, the functions described above do not include these options. Function parameters::dof_betwithin can be used to use between-withing ddff, does it would not return exactly the same results as shown in Li and Redden 2015 \\[4\\], but similar results are obtained \\[6\\].\n\nThe Sandwich S.E. can be obtained from different functions too: merDeriv::sandwich function can be used for glmer objects, and clubSandwich::vovHC for glmmTMB objects (recent versions of the package R (\\>= 3.6.0) support the computation of Sandwich S.E. using the using clubSandwich::vcovHC function \\[7\\].\n\nA function 'new_mode' is created below, where results obtained from a glmer model can be modified to use the sandwich SE and the between-within approximated ddff:\n\n### lme::glmer\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnew_model <- function(model, est_fix , se , df){\n#Re-calculate (2-sided) p-values using estimated parameters and its SE\nt <- est_fix /se\npvalue <-2*pt(q=abs(t), df=df, lower.tail=FALSE)\n#Combine results in a data frame and add row names\nnew_model <- round(cbind(est_fix, se, df, t, pvalue), digits=4)\ncolnames(new_model) <- c(\"Estimate\", \"Std. Error\", \"df\", \"tvalue\", \"P-value\")\nrownames(new_model) <- rownames(summary(model)$coefficients)\nnew_model\n}\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Run the model\nmodel <- lme4::glmer(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=5)\n\n\n#Get parameter estimation\nest<-lme4::fixef(model)\n\n#Get Sandwich covariance matrix\nlibrary(merDeriv)\nvcov<-sandwich(model)\n\n#Get S.E. (the diagonal from the covariance matrix), remove last value as it corresponds to random effects\nse_sw0<- sqrt(diag(vcov)) \nse_sw <-head(se_sw0, -1)\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values using Sandwich SE and Inf ddff\nnew_model(model, est_fix=est, se=se_sw , df=Inf )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0618 0.5378 Inf -0.1149 0.9086\ntrtpA 1.5751 0.7914 Inf 1.9902 0.0466\navisitn2 -0.8719 0.5391 Inf -1.6172 0.1058\navisitn3 -0.2873 0.6102 Inf -0.4708 0.6378\navisitn4 -0.4318 0.5164 Inf -0.8361 0.4031\ntrtpA:avisitn2 1.0439 0.8220 Inf 1.2699 0.2041\ntrtpA:avisitn3 0.6361 0.8589 Inf 0.7407 0.4589\ntrtpA:avisitn4 -0.2200 0.7939 Inf -0.2772 0.7817\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values using Sandwich SE and between-within ddff\nnew_model(model, est_fix=est, se=se_sw , df=parameters::dof_betwithin(model))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0618 0.5378 324 -0.1149 0.9086\ntrtpA 1.5751 0.7914 324 1.9902 0.0474\navisitn2 -0.8719 0.5391 324 -1.6172 0.1068\navisitn3 -0.2873 0.6102 324 -0.4708 0.6381\navisitn4 -0.4318 0.5164 324 -0.8361 0.4037\ntrtpA:avisitn2 1.0439 0.8220 324 1.2699 0.2050\ntrtpA:avisitn3 0.6361 0.8589 324 0.7407 0.4594\ntrtpA:avisitn4 -0.2200 0.7939 324 -0.2772 0.7818\n```\n\n\n:::\n:::\n\n\n### glmmTMB::glmmTMB\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnew_model <- function(model, est_fix , se , df){\n#Re-calculate (2-sided) p-values using estimated parameters and its SE\nt <- est_fix /se\npvalue <-2*pt(q=abs(t), df=df, lower.tail=FALSE)\n#Combine results in a data frame and add row names\nnew_model <- round(cbind(est_fix, se, df, t, pvalue), digits=4)\ncolnames(new_model) <- c(\"Estimate\", \"Std. Error\", \"df\", \"tvalue\", \"P-value\")\nrownames(new_model) <- rownames(summary(model)$coefficients$cond)\nnew_model\n}\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Run the model\nmodel<- glmmTMB::glmmTMB(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\ndata = resp,\nfamily = binomial(link = \"logit\"))\n\n#Get estimator\nest<-glmmTMB::fixef(model)$cond\n\n#Get Sandwich S.E.\nse_sw<- sqrt(diag(vcovHC(model)))\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values with Sandwich SE and Ininite df\nnew_model(model, est_fix=est, se=se_sw , df=Inf )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0848 0.5452 Inf -0.1555 0.8765\ntrtpA 1.6518 0.8182 Inf 2.0188 0.0435\navisitn2 -0.8576 0.5302 Inf -1.6175 0.1058\navisitn3 -0.2819 0.5986 Inf -0.4709 0.6377\navisitn4 -0.4236 0.5069 Inf -0.8357 0.4033\ntrtpA:avisitn2 1.0247 0.8026 Inf 1.2767 0.2017\ntrtpA:avisitn3 0.6222 0.8397 Inf 0.7410 0.4587\ntrtpA:avisitn4 -0.2050 0.7727 Inf -0.2653 0.7907\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values with Sandwich SE between-withing aprox. df\nnew_model(model, est_fix=est, se=se_sw , df=parameters::dof_betwithin(model))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0848 0.5452 324 -0.1555 0.8766\ntrtpA 1.6518 0.8182 324 2.0188 0.0443\navisitn2 -0.8576 0.5302 324 -1.6175 0.1067\navisitn3 -0.2819 0.5986 324 -0.4709 0.6380\navisitn4 -0.4236 0.5069 324 -0.8357 0.4039\ntrtpA:avisitn2 1.0247 0.8026 324 1.2767 0.2026\ntrtpA:avisitn3 0.6222 0.8397 324 0.7410 0.4592\ntrtpA:avisitn4 -0.2050 0.7727 324 -0.2653 0.7909\n```\n\n\n:::\n:::\n\n\n# PREDICTED PROBABILITIES AND ODDS RATIO (OR)\n\nEstimated probabilities and ORs, obtained by using the `emmeans` and `contrast` functions. In the example below, the probabilities and OR are estimated for GLMM using GHQ approximation.\n\nTo obtain robust (sandwich) standard errors, the `emmeans` function allows specifying a sandwich covariance estimator using `vcov.method = sandwich(model)`, as illustrated in the example below. To instead use the model‑based (naive) standard errors, simply supply the model’s own covariance matrix by specifying `vcov.method = vcov(model)`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Run the model\nmodel <- lme4::glmer(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=5)\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(emmeans)\n#Get predicted probabilities for each treatment, using the Sandwich matrix covariance\nprob <- emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=sandwich(model), type='response')\nprob\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trtp avisitn prob SE df asymp.LCL asymp.UCL\n P 1 0.485 0.1320 Inf 0.251 0.726\n A 1 0.820 0.0854 Inf 0.594 0.934\n P 2 0.282 0.1090 Inf 0.120 0.531\n A 2 0.844 0.0772 Inf 0.631 0.944\n P 3 0.414 0.1280 Inf 0.200 0.666\n A 3 0.866 0.0691 Inf 0.668 0.954\n P 4 0.379 0.1250 Inf 0.177 0.634\n A 4 0.703 0.1150 Inf 0.445 0.875\n\nConfidence level used: 0.95 \nIntervals are back-transformed from the logit scale \n```\n\n\n:::\n\n```{.r .cell-code}\n#Get differences between treatments by visit (option \"revpairwise\" is used to compare A vs P)\ndiffs_mean<-emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=sandwich(model))\ndiffs <- contrast(diffs_mean,\"revpairwise\", simple=\"trtp\")\ndiffs <- as.data.frame(diffs)\n\n#Calculate CI (alpha=0.05)\nalpha<-0.05\nz_crit <- qnorm(1 - alpha / 2)\ndiffs$low<-diffs$estimate - (z_crit*diffs$SE)\ndiffs$upp<-diffs$estimate + (z_crit*diffs$SE)\n\n#Get OR applying exponential transformation;\nor<-exp(diffs$estimate)\nor_low<-exp(diffs$low)\nor_upp<-exp(diffs$up)\n\n#Get two-sided p-value\nz <- diffs$estimate/diffs$SE\npvalue <- 2 * (1 - pnorm(z))\n\n#Create a dataset with all the results\nOR<-as.data.frame(cbind(diffs$avisitn, or,or_low, or_upp, z, round(pvalue, digits=4)))\ncolnames(OR)<-c('avisit', 'OR', 'lower.CL', 'upper.CL', 'Z', 'p-value')\nOR\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n avisit OR lower.CL upper.CL Z p-value\n1 1 4.830986 1.0401682 22.43716 2.010228 0.0444\n2 2 13.720725 2.8152984 66.86975 3.240837 0.0012\n3 3 9.126521 1.8973419 43.90004 2.759125 0.0058\n4 4 3.876834 0.8558308 17.56170 1.757983 0.0788\n```\n\n\n:::\n:::\n\n\n# OUTCOME WITH MORE THAN TWO CATEGORIES\n\nAlthough less common than binary outcomes, endpoints with more than two categories may be the outcome of interest, which can be either ordinal or nominal. In those case, the multinomial distribution is used selecting the appropriated link function depending on the type of response variable.\n\n### Ordinal variable\n\nThe function `ordinal::clmm` can be used to fit a GLMM model when the outcome is an ordinal variable.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel<- ordinal::clmm(respord ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp)\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCumulative Link Mixed Model fitted with the Laplace approximation\n\nformula: respord ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\ndata: resp\n\n link threshold nobs logLik AIC niter max.grad cond.H \n logit flexible 444 -482.20 984.40 666(1357) 6.84e-04 1.7e+02\n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 0.1511 0.3887 \nNumber of groups: usubjid 111 \n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|)\ntrtpA -0.43273 0.36062 -1.200 0.230\navisitn2 0.01155 0.35236 0.033 0.974\navisitn3 -0.10700 0.35079 -0.305 0.760\navisitn4 0.17114 0.34838 0.491 0.623\ntrtpA:avisitn2 0.26152 0.50072 0.522 0.601\ntrtpA:avisitn3 0.57356 0.50391 1.138 0.255\ntrtpA:avisitn4 0.31098 0.49794 0.625 0.532\n\nThreshold coefficients:\n Estimate Std. Error z value\n1|2 -0.5630 0.2573 -2.188\n2|3 0.6573 0.2589 2.539\n```\n\n\n:::\n:::\n\n\n### Nominal variable\n\nNo R functions have been identified for handling multinomial distributions for a nominal variable in a frequentist framework.\n\n# REFERENCES\n\n\\[1\\] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n\\[2\\] Conceptual explanations were assisted using Microsoft Copilot (M365 Copilot, GPT‑5‑based model).\n\n\\[3\\] [Stack Overflow \\[Internet\\]. 2008 \\[Last visited: 2025 Sep 30\\]](https://stackoverflow.com/questions/33670628/solution-to-the-warning-message-using-glmer)\n\n\\[4\\] [Li, P., & Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15, 38.](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/s12874-015-0026-x)\n\n\\[5\\] [U.S. Food and Drug Administration. (2023). Adjusting for Covariates in Randomized Clinical Trials for Drugs and Biological Products: Guidance for Industry. Center for Drug Evaluation and Research (CDER), Center for Biologics Evaluation and Research (CBER).](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/adjusting-covariates-randomized-clinical-trials-drugs-and-biological-products)\n\n\\[6\\] [Documentation of package parameters. dof_betwithin](https://search.r-project.org/CRAN/refmans/parameters/html/p_value_betwithin.html)\n\n\\[7\\] Brooks, M. E., et al. (2025). glmmTMB: Generalized Linear Mixed Models using Template Model Builder (Version 1.1.12) \\[R package manual\\]. The Comprehensive R Archive Network (CRAN). \n\n\\[8\\] [Ordinal: Regression Models for Ordinal Data.](https://cran.r-project.org/web/packages/ordinal/index.html)", + "markdown": "---\ntitle: \"Generalized Linear Mixed Models (GLMM)\"\n---\n\n# INTRODUCTION\n\nGeneralized Linear Mixed Models (GLMM) method combines the characteristics of the Generalized Linear Model (GLM), with mixed models (such a repeated measures over time). It extends the GLM framework using link functions that relate the predictors to transformed outcome variable.\n\n$$\nE(Y)=\\mu\n$$\n\n$$\ng(\\mu) = X\\beta + Zb, \\qquad b \\sim N(0, G)\n$$\n\nWhere:\n\nn: number of observations, p: number of fixed effects, q: number of random effects (subjects).\n\nY: vector of observed response variable (n x 1)\n\ng: Link function that transforms Y to the linear scale (eg: logit)\n\nX: matrix for fixed effects (n x p), Z: matrix of random effects, G: covariance matrix of the random effects.\n\nB: vector of fixed effects coefficients (p x 1)., b: vector of random effects.\n\n**Link Function:**\n\n- Dichotomous response variable: probit (in case of rare events complementary log-log may be preferable).\n\n- Outcomes with more than two categories:\n\n - Ordinal variable: cumulative\n\n - Nominal variable: generalized logit\n\n**Random Effects**\n\nGLMM are conditional models and estimate subject-average effects, and the intra-subject correlation is modelled via random effects. Unlike GEE models, GLMM models allow individual-level inference.\n\n**Estimation Methods**\n\nMaximum likelihood, based on approximations:\n\n- Gauss Hermite Quadrature (GHQ): Integral split in a given number of points.\n\n- Laplace: A specific case of GHQ, using 1 point.\n\nPenalized Likelihood can be used too, but it is known that in binary data it underestimates variance components and biased results.\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) \\[1\\] in the SAS/STAT Sample Program Library \\[1\\] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT, BASELINE, and VISIT were renamed to TRTP, BASE, and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. Data were created in SAS (See SAS section), and imported in R.\n\nThe variables OUTCOME, TRTP, RESPORD, USUBJID and AVISITN were converted to factors. Since the modeling functions use the first (alphabetically) level as the reference category, TRT levels are ordered as 'P' (placebo) and 'A' (active), to ensure that placebo is used as the reference category in the models:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(readxl)\nresp <- read_excel(\"../data/resp_gee.xlsx\")\nresp$trtp<-factor(resp$trtp, levels=c('P', 'A')) \nresp$avisitn<-factor(resp$avisitn) \nresp$outcome<-factor(resp$outcome)\nresp$usubjid<-factor(resp$usubjid)\nresp$respord<-factor(resp$respord)\n```\n:::\n\n\n# PACKAGES\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(lme4)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: Matrix\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(GLMMadaptive)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'GLMMadaptive'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:lme4':\n\n negative.binomial\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(merDeriv)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: nonnest2\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThis is nonnest2 0.5-8.\nnonnest2 has not been tested with all combinations of supported model classes.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: sandwich\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: lavaan\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThis is lavaan 0.6-21\nlavaan is FREE software! Please report any bugs.\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(parameters)\nlibrary(glmmTMB)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'glmmTMB'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:sandwich':\n\n meatHC, sandwich\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(clubSandwich)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nRegistered S3 methods overwritten by 'clubSandwich':\n method from \n bread.lmerMod merDeriv\n bread.mlm sandwich\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(MASS)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'MASS'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:GLMMadaptive':\n\n negative.binomial\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(emmeans)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWelcome to emmeans.\nCaution: You lose important information if you filter this package's results.\nSee '? untidy'\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(ordinal)\n```\n:::\n\n\n# GLMM WITH GHQ\n\nGLMM with GHQ approximation can be fitted using `lme4::glmer` and`GLMMadaptative::mixed_model`R functions.\n\nIn GLMMs, intra‑subject (within‑cluster) correlation is captured through the inclusion of random effects. Both functions estimate this correlation by assuming normally distributed random effects with infinite degrees of freedom and, by default, model the random‑effects covariance matrix GGG using a Variance Components (VC) structure.\n\n### lme::glmer\n\nThe syntax to fit a GLMM using GHQ with `lme4::glme` is displayed below, the random effects are specified as `1|usubjid` , where the number 1 denotes a random intercept (different baseline for each individual). By specifying `nAQG=` the GHQ method is applied with the specified number of points (in this example n=5).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- lme4::glmer(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=5)\n\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGeneralized linear mixed model fit by maximum likelihood (Adaptive\n Gauss-Hermite Quadrature, nAGQ = 5) [glmerMod]\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\n Data: resp\n\n AIC BIC logLik -2*log(L) df.resid \n 487.2 524.0 -234.6 469.2 435 \n\nScaled residuals: \n Min 1Q Median 3Q Max \n-2.1952 -0.3744 0.2062 0.4089 2.1081 \n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.963 2.639 \nNumber of obs: 444, groups: usubjid, 111\n\nFixed effects:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.06177 0.52740 -0.117 0.9068 \ntrtpA 1.57505 0.78351 2.010 0.0444 *\navisitn2 -0.87192 0.54762 -1.592 0.1113 \navisitn3 -0.28729 0.53694 -0.535 0.5926 \navisitn4 -0.43176 0.53859 -0.802 0.4228 \ntrtpA:avisitn2 1.04386 0.80357 1.299 0.1939 \ntrtpA:avisitn3 0.63613 0.79997 0.795 0.4265 \ntrtpA:avisitn4 -0.22003 0.78659 -0.280 0.7797 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation of Fixed Effects:\n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.676 \navisitn2 -0.497 0.317 \navisitn3 -0.505 0.334 0.496 \navisitn4 -0.503 0.330 0.499 0.500 \ntrtpA:vstn2 0.338 -0.482 -0.683 -0.339 -0.341 \ntrtpA:vstn3 0.338 -0.488 -0.336 -0.672 -0.337 0.495 \ntrtpA:vstn4 0.346 -0.518 -0.334 -0.340 -0.681 0.495 0.496\n```\n\n\n:::\n:::\n\n\n### GLMMadapive::mixed_model\n\nThe syntax using `GLMMadaptative::mixed_model` is similar, but the random effects are specified by using the `random` argument.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- GLMMadaptive::mixed_model(fixed = outcome ~ trtp + avisitn + trtp*avisitn,\n random = ~1|usubjid,\n data = resp,\n family = binomial(link = \"logit\"),\n nAGQ=5)\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nGLMMadaptive::mixed_model(fixed = outcome ~ trtp + avisitn + \n trtp * avisitn, random = ~1 | usubjid, data = resp, family = binomial(link = \"logit\"), \n nAGQ = 5)\n\nData Descriptives:\nNumber of Observations: 444\nNumber of Groups: 111 \n\nModel:\n family: binomial\n link: logit \n\nFit statistics:\n log.Lik AIC BIC\n -234.5954 487.1909 511.5767\n\nRandom effects covariance matrix:\n StdDev\n(Intercept) 2.680307\n\nFixed effects:\n Estimate Std.Err z-value p-value\n(Intercept) -0.0760 0.5806 -0.1308 0.89590\ntrtpA 1.6388 0.8728 1.8776 0.06044\navisitn2 -0.8746 0.5475 -1.5974 0.11017\navisitn3 -0.2890 0.5374 -0.5377 0.59080\navisitn4 -0.4335 0.5390 -0.8043 0.42123\ntrtpA:avisitn2 1.0461 0.8031 1.3026 0.19272\ntrtpA:avisitn3 0.6375 0.8000 0.7969 0.42553\ntrtpA:avisitn4 -0.2188 0.7875 -0.2778 0.78118\n\nIntegration:\nmethod: adaptive Gauss-Hermite quadrature rule\nquadrature points: 5\n\nOptimization:\nmethod: hybrid EM and quasi-Newton\nconverged: TRUE \n```\n\n\n:::\n:::\n\n\nResults produced by the mixed_model function show slight deviations when compared to glmer, suggesting differences in the model implementation or estimation.\n\nEven with Gauss–Hermite quadrature in both, **lme4::glmer** and **GLMMadaptive::mixed_model** use **different likelihood parameterizations, adaptive quadrature implementations, and optimization routines**, so they don’t evaluate the exact same objective and can yield different estimates (especially for non-Gaussian outcomes or sparse data). In practice, `mixed_model()` tends to provide **more accurate marginal likelihood integration** (subject-specific adaptive scaling), while `glmer()` is **faster but more approximate**, which can bias variance components and slightly shift fixed effects \\[2\\].\n\n# GLMM WITH LAPLACE\n\nLaplace is a particular GHQ where only one point is used. In R, `lme4::glmer` can also be used to compute Laplace approximation, but not `GLMMadaptative::mixed_model`. The `glmmTMB::glmmTMB`function is also a viable alternative.\n\n### lme::glmer\n\nThe syntax using `lme4::glmer` is the same as in GHQ, but without specifying the number of points:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- lme4::glmer(formula =outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link =\"logit\"))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in checkConv(attr(opt, \"derivs\"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.0092645 (tol = 0.002, component 1)\n See ?lme4::convergence and ?lme4::troubleshooting.\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGeneralized linear mixed model fit by maximum likelihood (Laplace\n Approximation) [glmerMod]\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\n Data: resp\n\n AIC BIC logLik -2*log(L) df.resid \n 493.4 530.2 -237.7 475.4 435 \n\nScaled residuals: \n Min 1Q Median 3Q Max \n-2.1946 -0.3899 0.2097 0.4216 2.0956 \n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.433 2.536 \nNumber of obs: 444, groups: usubjid, 111\n\nFixed effects:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.08269 0.52044 -0.159 0.8738 \ntrtpA 1.64646 0.78035 2.110 0.0349 *\navisitn2 -0.85805 0.54286 -1.581 0.1140 \navisitn3 -0.28067 0.53168 -0.528 0.5976 \navisitn4 -0.42263 0.53329 -0.792 0.4281 \ntrtpA:avisitn2 1.02843 0.79403 1.295 0.1953 \ntrtpA:avisitn3 0.62119 0.79066 0.786 0.4321 \ntrtpA:avisitn4 -0.20289 0.77541 -0.262 0.7936 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation of Fixed Effects:\n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.675 \navisitn2 -0.497 0.310 \navisitn3 -0.508 0.331 0.495 \navisitn4 -0.506 0.327 0.497 0.500 \ntrtpA:vstn2 0.339 -0.474 -0.685 -0.339 -0.340 \ntrtpA:vstn3 0.340 -0.482 -0.336 -0.673 -0.338 0.493 \ntrtpA:vstn4 0.350 -0.510 -0.336 -0.342 -0.685 0.496 0.496\noptimizer (Nelder_Mead) convergence code: 0 (OK)\nModel failed to converge with max|grad| = 0.0092645 (tol = 0.002, component 1)\n See ?lme4::convergence and ?lme4::troubleshooting.\n```\n\n\n:::\n:::\n\n\nThis initial attempt using `lme4::glmer`resulted in a convergence warning, which can be addressed by switching the optimizer to \"bobyqa\" and extending the maximum number of iterations, as shown in the code below. These adjustments not only suppress the warning but provides closer results to SAS \\[3\\].\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- lme4::glmer(formula =outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=1,\n control=lme4::glmerControl(optimizer=\"bobyqa\", \n optCtrl=list(maxfun=100000)))\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nGeneralized linear mixed model fit by maximum likelihood (Laplace\n Approximation) [glmerMod]\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\n Data: resp\nControl: \nlme4::glmerControl(optimizer = \"bobyqa\", optCtrl = list(maxfun = 1e+05))\n\n AIC BIC logLik -2*log(L) df.resid \n 493.4 530.2 -237.7 475.4 435 \n\nScaled residuals: \n Min 1Q Median 3Q Max \n-2.1964 -0.3901 0.2097 0.4213 2.0951 \n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.44 2.538 \nNumber of obs: 444, groups: usubjid, 111\n\nFixed effects:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.08475 0.52066 -0.163 0.8707 \ntrtpA 1.65139 0.78090 2.115 0.0345 *\navisitn2 -0.85752 0.54292 -1.579 0.1142 \navisitn3 -0.28186 0.53176 -0.530 0.5961 \navisitn4 -0.42356 0.53338 -0.794 0.4271 \ntrtpA:avisitn2 1.02468 0.79411 1.290 0.1969 \ntrtpA:avisitn3 0.62207 0.79083 0.787 0.4315 \ntrtpA:avisitn4 -0.20495 0.77556 -0.264 0.7916 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCorrelation of Fixed Effects:\n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.675 \navisitn2 -0.497 0.310 \navisitn3 -0.508 0.331 0.495 \navisitn4 -0.506 0.326 0.497 0.500 \ntrtpA:vstn2 0.339 -0.473 -0.685 -0.339 -0.340 \ntrtpA:vstn3 0.340 -0.482 -0.336 -0.673 -0.338 0.493 \ntrtpA:vstn4 0.350 -0.510 -0.336 -0.342 -0.685 0.496 0.496\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- glmer(formula =outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=1,\n control=glmerControl(optimizer=\"bobyqa\", \n optCtrl=list(maxfun=100000)))\n```\n:::\n\n\n### glmmTMB::glmmTMB\n\nThe `glmmTMB::glmmTMB` function is also a viable alternative for Laplace approximation, with similar syntax to `lme4::glmer`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel<- glmmTMB::glmmTMB(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\n data = resp,\n family = binomial(link = \"logit\"))\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Family: binomial ( logit )\nFormula: outcome ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\nData: resp\n\n AIC BIC logLik -2*log(L) df.resid \n 493.4 530.2 -237.7 475.4 435 \n\nRandom effects:\n\nConditional model:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 6.442 2.538 \nNumber of obs: 444, groups: usubjid, 111\n\nConditional model:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -0.08476 0.52073 -0.163 0.8707 \ntrtpA 1.65178 0.78148 2.114 0.0345 *\navisitn2 -0.85758 0.54300 -1.579 0.1143 \navisitn3 -0.28190 0.53181 -0.530 0.5961 \navisitn4 -0.42359 0.53344 -0.794 0.4271 \ntrtpA:avisitn2 1.02474 0.79432 1.290 0.1970 \ntrtpA:avisitn3 0.62216 0.79103 0.786 0.4316 \ntrtpA:avisitn4 -0.20503 0.77578 -0.264 0.7916 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\nAlthough both `glmmTMB` and `lme4::glmer` implement the Laplace approximation for fitting GLMMs, they rely on different numerical frameworks and optimization strategies. `glmmTMB` uses TMB with automatic differentiation and its own Laplace implementation, whereas `glmer` uses lme4’s custom penalized deviance formulation and derivative‑free optimizers. As a result, the approximated likelihoods, gradients, and optimization paths differ, leading to non‑identical parameter estimates even under the same model specification. \\[2\\]\n\n# PENALIZED QUASI-LIKELIHOOD (PQL)\n\nThe PQL approach uses linear approximations instead of likelihood, making it **less accurate for binary outcomes compared to the GHQ or Laplace** methods described above. PQL computation can be obtained using the `glmmPQL` function form the MASS package, using the random argument to specify the random factors.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel <- MASS::glmmPQL(outcome ~ trtp + avisitn + trtp*avisitn,\n random=list(~1|usubjid),\n data = resp,\n family = binomial(link = \"logit\"))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 1\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 2\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 3\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 4\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 5\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\niteration 6\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nLinear mixed-effects model fit by maximum likelihood\n Data: resp \n AIC BIC logLik\n NA NA NA\n\nRandom effects:\n Formula: ~1 | usubjid\n (Intercept) Residual\nStdDev: 2.240666 0.7414888\n\nVariance function:\n Structure: fixed weights\n Formula: ~invwt \nFixed effects: outcome ~ trtp + avisitn + trtp * avisitn \n Value Std.Error DF t-value p-value\n(Intercept) 0.0000080 0.4125606 327 0.0000194 1.0000\ntrtpA 1.2202691 0.6029058 109 2.0239797 0.0454\navisitn2 -0.8149934 0.3942084 327 -2.0674177 0.0395\navisitn3 -0.2682000 0.3879416 327 -0.6913412 0.4898\navisitn4 -0.4033782 0.3890430 327 -1.0368474 0.3006\ntrtpA:avisitn2 0.9759879 0.5795035 327 1.6841796 0.0931\ntrtpA:avisitn3 0.5950261 0.5780995 327 1.0292797 0.3041\ntrtpA:avisitn4 -0.1998455 0.5678652 327 -0.3519241 0.7251\n Correlation: \n (Intr) trtpA avstn2 avstn3 avstn4 trtA:2 trtA:3\ntrtpA -0.684 \navisitn2 -0.464 0.317 \navisitn3 -0.467 0.319 0.495 \navisitn4 -0.466 0.319 0.496 0.498 \ntrtpA:avisitn2 0.315 -0.469 -0.680 -0.337 -0.338 \ntrtpA:avisitn3 0.313 -0.467 -0.332 -0.671 -0.334 0.492 \ntrtpA:avisitn4 0.319 -0.485 -0.340 -0.341 -0.685 0.500 0.497\n\nStandardized Within-Group Residuals:\n Min Q1 Med Q3 Max \n-2.8440535 -0.4341097 0.2723992 0.4824627 2.8108150 \n\nNumber of Observations: 444\nNumber of Groups: 111 \n```\n\n\n:::\n:::\n\n\n**Note:** `glmmPQL` is widely recognized as **less reliable for binary outcomes**, more robust approaches such as Laplace or GHQ discussed in previous sections are generally preferred.\n\n# SANDWICH SE AND DEGREES OF FREEDOM (DDFF)\n\nPrevious results are computed using default outputs, so naive SE and infinite ddff are used. However, Li. P. and Redden, D.T. (2015) \\[4\\], suggested using the Between-Within denominator degrees of freedom approximation method when using GLMMs in randomized trials with binary outcomes and small sample size.\n\nAdditionally, FDA \\[5\\] advises \"sponsors to consider using of robust SE method such as the Huber-White \"sandwich\" SE, particularly when the model does not include treatment by covariate interaction.\n\nIn R, the functions described above do not include these options. Function parameters::dof_betwithin can be used to use between-withing ddff, does it would not return exactly the same results as shown in Li and Redden 2015 \\[4\\], but similar results are obtained \\[6\\].\n\nThe Sandwich S.E. can be obtained from different functions too: merDeriv::sandwich function can be used for glmer objects, and clubSandwich::vovHC for glmmTMB objects (recent versions of the package R (\\>= 3.6.0) support the computation of Sandwich S.E. using the using clubSandwich::vcovHC function \\[7\\].\n\nA function 'new_mode' is created below, where results obtained from a glmer model can be modified to use the sandwich SE and the between-within approximated ddff:\n\n### lme::glmer\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnew_model <- function(model, est_fix , se , df){\n#Re-calculate (2-sided) p-values using estimated parameters and its SE\nt <- est_fix /se\npvalue <-2*pt(q=abs(t), df=df, lower.tail=FALSE)\n#Combine results in a data frame and add row names\nnew_model <- round(cbind(est_fix, se, df, t, pvalue), digits=4)\ncolnames(new_model) <- c(\"Estimate\", \"Std. Error\", \"df\", \"tvalue\", \"P-value\")\nrownames(new_model) <- rownames(summary(model)$coefficients)\nnew_model\n}\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Run the model\nmodel <- lme4::glmer(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=5)\n\n\n#Get parameter estimation\nest<-lme4::fixef(model)\n\n#Get Sandwich covariance matrix\nlibrary(merDeriv)\nvcov<-sandwich(model)\n\n#Get S.E. (the diagonal from the covariance matrix), remove last value as it corresponds to random effects\nse_sw0<- sqrt(diag(vcov)) \nse_sw <-head(se_sw0, -1)\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values using Sandwich SE and Inf ddff\nnew_model(model, est_fix=est, se=se_sw , df=Inf )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0618 0.5378 Inf -0.1149 0.9086\ntrtpA 1.5751 0.7914 Inf 1.9902 0.0466\navisitn2 -0.8719 0.5391 Inf -1.6172 0.1058\navisitn3 -0.2873 0.6102 Inf -0.4708 0.6378\navisitn4 -0.4318 0.5164 Inf -0.8361 0.4031\ntrtpA:avisitn2 1.0439 0.8220 Inf 1.2699 0.2041\ntrtpA:avisitn3 0.6361 0.8589 Inf 0.7407 0.4589\ntrtpA:avisitn4 -0.2200 0.7939 Inf -0.2772 0.7817\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values using Sandwich SE and between-within ddff\nnew_model(model, est_fix=est, se=se_sw , df=parameters::dof_betwithin(model))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0618 0.5378 324 -0.1149 0.9086\ntrtpA 1.5751 0.7914 324 1.9902 0.0474\navisitn2 -0.8719 0.5391 324 -1.6172 0.1068\navisitn3 -0.2873 0.6102 324 -0.4708 0.6381\navisitn4 -0.4318 0.5164 324 -0.8361 0.4037\ntrtpA:avisitn2 1.0439 0.8220 324 1.2699 0.2050\ntrtpA:avisitn3 0.6361 0.8589 324 0.7407 0.4594\ntrtpA:avisitn4 -0.2200 0.7939 324 -0.2772 0.7818\n```\n\n\n:::\n:::\n\n\n### glmmTMB::glmmTMB\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnew_model <- function(model, est_fix , se , df){\n#Re-calculate (2-sided) p-values using estimated parameters and its SE\nt <- est_fix /se\npvalue <-2*pt(q=abs(t), df=df, lower.tail=FALSE)\n#Combine results in a data frame and add row names\nnew_model <- round(cbind(est_fix, se, df, t, pvalue), digits=4)\ncolnames(new_model) <- c(\"Estimate\", \"Std. Error\", \"df\", \"tvalue\", \"P-value\")\nrownames(new_model) <- rownames(summary(model)$coefficients$cond)\nnew_model\n}\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Run the model\nmodel<- glmmTMB::glmmTMB(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid),\ndata = resp,\nfamily = binomial(link = \"logit\"))\n\n#Get estimator\nest<-glmmTMB::fixef(model)$cond\n\n#Get Sandwich S.E.\nse_sw<- sqrt(diag(vcovHC(model)))\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values with Sandwich SE and Ininite df\nnew_model(model, est_fix=est, se=se_sw , df=Inf )\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0848 0.5452 Inf -0.1555 0.8765\ntrtpA 1.6518 0.8182 Inf 2.0188 0.0435\navisitn2 -0.8576 0.5302 Inf -1.6175 0.1058\navisitn3 -0.2819 0.5986 Inf -0.4709 0.6377\navisitn4 -0.4236 0.5069 Inf -0.8357 0.4033\ntrtpA:avisitn2 1.0247 0.8026 Inf 1.2767 0.2017\ntrtpA:avisitn3 0.6222 0.8397 Inf 0.7410 0.4587\ntrtpA:avisitn4 -0.2050 0.7727 Inf -0.2653 0.7907\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Re-calculate p-values with Sandwich SE between-withing aprox. df\nnew_model(model, est_fix=est, se=se_sw , df=parameters::dof_betwithin(model))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error df tvalue P-value\n(Intercept) -0.0848 0.5452 324 -0.1555 0.8766\ntrtpA 1.6518 0.8182 324 2.0188 0.0443\navisitn2 -0.8576 0.5302 324 -1.6175 0.1067\navisitn3 -0.2819 0.5986 324 -0.4709 0.6380\navisitn4 -0.4236 0.5069 324 -0.8357 0.4039\ntrtpA:avisitn2 1.0247 0.8026 324 1.2767 0.2026\ntrtpA:avisitn3 0.6222 0.8397 324 0.7410 0.4592\ntrtpA:avisitn4 -0.2050 0.7727 324 -0.2653 0.7909\n```\n\n\n:::\n:::\n\n\n# PREDICTED PROBABILITIES AND ODDS RATIO (OR)\n\nEstimated probabilities and ORs, obtained by using the `emmeans` and `contrast` functions. In the example below, the probabilities and OR are estimated for GLMM using GHQ approximation.\n\nTo obtain robust (sandwich) standard errors, the `emmeans` function allows specifying a sandwich covariance estimator using `vcov.method = sandwich(model)`, as illustrated in the example below. To instead use the model‑based (naive) standard errors, simply supply the model’s own covariance matrix by specifying `vcov.method = vcov(model)`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#Run the model\nmodel <- lme4::glmer(outcome ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp,\n family = binomial(link =\"logit\"),\n nAGQ=5)\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(emmeans)\n#Get predicted probabilities for each treatment, using the Sandwich matrix covariance\nprob <- emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=sandwich(model), type='response')\nprob\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trtp avisitn prob SE df asymp.LCL asymp.UCL\n P 1 0.485 0.1320 Inf 0.251 0.725\n A 1 0.820 0.0854 Inf 0.594 0.934\n P 2 0.282 0.1090 Inf 0.120 0.531\n A 2 0.844 0.0772 Inf 0.631 0.944\n P 3 0.414 0.1280 Inf 0.200 0.666\n A 3 0.866 0.0691 Inf 0.668 0.954\n P 4 0.379 0.1250 Inf 0.177 0.634\n A 4 0.703 0.1150 Inf 0.445 0.875\n\nConfidence level used: 0.95 \nIntervals are back-transformed from the logit scale \n```\n\n\n:::\n\n```{.r .cell-code}\n#Get differences between treatments by visit (option \"revpairwise\" is used to compare A vs P)\ndiffs_mean<-emmeans(model, ~ trtp*avisitn, data=resp, vcov.method=sandwich(model))\ndiffs <- contrast(diffs_mean,\"revpairwise\", simple=\"trtp\")\ndiffs <- as.data.frame(diffs)\n\n#Calculate CI (alpha=0.05)\nalpha<-0.05\nz_crit <- qnorm(1 - alpha / 2)\ndiffs$low<-diffs$estimate - (z_crit*diffs$SE)\ndiffs$upp<-diffs$estimate + (z_crit*diffs$SE)\n\n#Get OR applying exponential transformation;\nor<-exp(diffs$estimate)\nor_low<-exp(diffs$low)\nor_upp<-exp(diffs$up)\n\n#Get two-sided p-value\nz <- diffs$estimate/diffs$SE\npvalue <- 2 * (1 - pnorm(z))\n\n#Create a dataset with all the results\nOR<-as.data.frame(cbind(diffs$avisitn, or,or_low, or_upp, z, round(pvalue, digits=4)))\ncolnames(OR)<-c('avisit', 'OR', 'lower.CL', 'upper.CL', 'Z', 'p-value')\nOR\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n avisit OR lower.CL upper.CL Z p-value\n1 1 4.830986 1.0401922 22.43665 2.010258 0.0444\n2 2 13.720725 2.8153159 66.86933 3.240849 0.0012\n3 3 9.126521 1.8973478 43.89990 2.759130 0.0058\n4 4 3.876834 0.8558356 17.56160 1.757990 0.0787\n```\n\n\n:::\n:::\n\n\n# OUTCOME WITH MORE THAN TWO CATEGORIES\n\nAlthough less common than binary outcomes, endpoints with more than two categories may be the outcome of interest, which can be either ordinal or nominal. In those case, the multinomial distribution is used selecting the appropriated link function depending on the type of response variable.\n\n### Ordinal variable\n\nThe function `ordinal::clmm` can be used to fit a GLMM model when the outcome is an ordinal variable.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodel<- ordinal::clmm(respord ~ trtp + avisitn + trtp*avisitn + (1 | usubjid), \n data = resp)\nsummary(model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCumulative Link Mixed Model fitted with the Laplace approximation\n\nformula: respord ~ trtp + avisitn + trtp * avisitn + (1 | usubjid)\ndata: resp\n\n link threshold nobs logLik AIC niter max.grad cond.H \n logit flexible 444 -482.20 984.40 688(1401) 6.06e-04 1.7e+02\n\nRandom effects:\n Groups Name Variance Std.Dev.\n usubjid (Intercept) 0.1511 0.3887 \nNumber of groups: usubjid 111 \n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|)\ntrtpA -0.43273 0.36061 -1.200 0.230\navisitn2 0.01155 0.35236 0.033 0.974\navisitn3 -0.10702 0.35078 -0.305 0.760\navisitn4 0.17113 0.34837 0.491 0.623\ntrtpA:avisitn2 0.26151 0.50071 0.522 0.601\ntrtpA:avisitn3 0.57355 0.50389 1.138 0.255\ntrtpA:avisitn4 0.31098 0.49793 0.625 0.532\n\nThreshold coefficients:\n Estimate Std. Error z value\n1|2 -0.5630 0.2573 -2.188\n2|3 0.6573 0.2589 2.539\n```\n\n\n:::\n:::\n\n\n### Nominal variable\n\nNo R functions have been identified for handling multinomial distributions for a nominal variable in a frequentist framework.\n\n# REFERENCES\n\n\\[1\\] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n\\[2\\] Conceptual explanations were assisted using Microsoft Copilot (M365 Copilot, GPT‑5‑based model).\n\n\\[3\\] [Stack Overflow \\[Internet\\]. 2008 \\[Last visited: 2025 Sep 30\\]](https://stackoverflow.com/questions/33670628/solution-to-the-warning-message-using-glmer)\n\n\\[4\\] [Li, P., & Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15, 38.](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/s12874-015-0026-x)\n\n\\[5\\] [U.S. Food and Drug Administration. (2023). Adjusting for Covariates in Randomized Clinical Trials for Drugs and Biological Products: Guidance for Industry. Center for Drug Evaluation and Research (CDER), Center for Biologics Evaluation and Research (CBER).](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/adjusting-covariates-randomized-clinical-trials-drugs-and-biological-products)\n\n\\[6\\] [Documentation of package parameters. dof_betwithin](https://search.r-project.org/CRAN/refmans/parameters/html/p_value_betwithin.html)\n\n\\[7\\] Brooks, M. E., et al. (2025). glmmTMB: Generalized Linear Mixed Models using Template Model Builder (Version 1.1.12) \\[R package manual\\]. The Comprehensive R Archive Network (CRAN). \n\n\\[8\\] [Ordinal: Regression Models for Ordinal Data.](https://cran.r-project.org/web/packages/ordinal/index.html)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/gsd-tte/execute-results/html.json b/_freeze/R/gsd-tte/execute-results/html.json index 0848781d2..391448714 100644 --- a/_freeze/R/gsd-tte/execute-results/html.json +++ b/_freeze/R/gsd-tte/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "f41ae59065a4098a28468e3f7cfd3ed2", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Group sequential design in R\"\neditor_options:\n chunk_output_type: console\n---\n\n## Group sequential design: time-to-event endpoint\n\nWhile a group sequential design (GSD) could be applied for different types of endpoints, here we focus on time-to-event endpoints.\n\n## Available R packages\n\nThe commonly used R packages for power and sample size calculations utilizing a GSD are: [gsDesign](https://keaven.github.io/gsDesign/) (also has a [web interface](https://rinpharma.shinyapps.io/gsdesign/)), [gsDesign2](https://merck.github.io/gsDesign2/), and [rpact](https://www.rpact.org/).\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gsDesign)\nlibrary(gsDesign2)\nlibrary(rpact)\nlibrary(tibble)\n```\n:::\n\n\n## Design assumptions\n\nUsing a toy example, we will assume that a primary objective of a phase III oncology trial is to compare a new therapy to a control in terms of progression-free survival (PFS) and overall survival (OS). Note that, in this example, we have a family of primary endpoints, i.e., if at least one of the endpoints is successful, the study will be declared a success. A GSD will be utilized for each endpoint. PFS will be tested at one interim analysis (IA) for both efficacy and non-binding futility, while OS will be tested at two IAs for efficacy only. An O'Brien-Fleming spending function will be used for efficacy testing and a Hwang-Shih-Decani spending function with $\\gamma = -10$ will be used for futility.\n\nFurther design assumptions are as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# PFS HR = 0.6\nhr1_pfs <- 0.6\n# Median PFS of 9.4 months in the control arm\nmed_pfs <- 9.4\n# Median follow-up of 10 months for PFS\nminfu_pfs <- 10\n# Monthly dropout of 0.019 for PFS\ndo_rate_pfs <- 0.019\n# IA timing for PFS is at 75% information fraction\ntiming_pfs <- c(0.75, 1)\n# Power of 95% for PFS\npower_pfs <- 0.95\n\n# OS HR = 0.65\nhr1_os <- 0.65\n# Median OS of 3 years in the control arm\nmed_os <- 12 * 3\n# Median follow-up of 42 months for OS\nminfu_os <- 42\n# Monthly dropout of 0.001 for OS\ndo_rate_os <- 0.001\n# IA timing for OS is at 60% and 80% information fraction\ntiming_os <- c(0.6, 0.8, 1)\n# Power of 82% for OS\npower_os <- 0.82\n\n# Enrollment period of 24 months\nenroll_dur <- 24\n# 1:1 randomization ratio\nrand_ratio <- 1\n# alpha level of 1.25% for each endpoint\nalphal <- 0.0125\n```\n:::\n\n\nWe assume that given the above assumptions, we need to calculate the target number of events for each analysis as well as the total sample size.\n\n## Example code\n\n### Example using gsDesign\n\n- PFS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npfs_gsDesign <- gsDesign::gsSurv(\n k = length(timing_pfs),\n timing = timing_pfs,\n R = enroll_dur,\n eta = do_rate_pfs,\n minfup = minfu_pfs,\n T = enroll_dur + minfu_pfs,\n lambdaC = log(2) / med_pfs,\n hr = hr1_pfs,\n beta = 1 - power_pfs,\n alpha = alphal,\n sfu = sfLDOF,\n sfl = sfHSD,\n sflpar = -10,\n test.type = 4\n)\n\npfs_gsDesign |>\n gsDesign::gsBoundSummary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nMethod: LachinFoulkes \n Analysis Value Efficacy Futility\n IA 1: 75% Z 2.6584 0.7432\n N: 398 p (1-sided) 0.0039 0.2287\n Events: 176 ~HR at bound 0.6693 0.8938\n Month: 25 P(Cross) if HR=1 0.0039 0.7713\n P(Cross) if HR=0.6 0.7668 0.0041\n Final Z 2.2801 2.2801\n N: 398 p (1-sided) 0.0113 0.0113\n Events: 234 ~HR at bound 0.7421 0.7421\n Month: 34 P(Cross) if HR=1 0.0125 0.9875\n P(Cross) if HR=0.6 0.9500 0.0500\n```\n\n\n:::\n:::\n\n\n- OS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nos_gsDesign <- gsDesign::gsSurv(\n k = length(timing_os),\n timing = timing_os,\n R = enroll_dur,\n eta = do_rate_os,\n minfup = minfu_os,\n T = enroll_dur + minfu_os,\n lambdaC = log(2) / med_os,\n hr = hr1_os,\n beta = 1 - power_os,\n alpha = alphal,\n sfu = sfLDOF,\n test.type = 1\n)\n\nos_gsDesign |>\n gsDesign::gsBoundSummary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nMethod: LachinFoulkes \n Analysis Value Efficacy\n IA 1: 60% Z 3.0205\n N: 394 p (1-sided) 0.0013\n Events: 131 ~HR at bound 0.5896\n Month: 38 P(Cross) if HR=1 0.0013\n P(Cross) if HR=0.65 0.2899\n IA 2: 80% Z 2.5874\n N: 394 p (1-sided) 0.0048\n Events: 175 ~HR at bound 0.6758\n Month: 51 P(Cross) if HR=1 0.0052\n P(Cross) if HR=0.65 0.6082\n Final Z 2.2958\n N: 394 p (1-sided) 0.0108\n Events: 218 ~HR at bound 0.7327\n Month: 66 P(Cross) if HR=1 0.0125\n P(Cross) if HR=0.65 0.8200\n```\n\n\n:::\n:::\n\n\n### Example using gsDesign2\n\n- PFS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nenroll_rate <- tibble(\n stratum = \"All\",\n duration = enroll_dur,\n rate = 1\n)\nfail_rate_pfs <- tibble(\n stratum = \"All\",\n duration = Inf, # Can be set to `Inf` when proportional hazard is assumed\n fail_rate = log(2) / med_pfs,\n hr = hr1_pfs,\n dropout_rate = do_rate_pfs\n)\n\npfs_gsDesign2 <- gsDesign2::gs_design_ahr(\n enroll_rate = enroll_rate,\n fail_rate = fail_rate_pfs,\n ratio = rand_ratio,\n beta = 1 - power_pfs,\n alpha = alphal,\n info_frac = timing_pfs,\n analysis_time = enroll_dur + minfu_pfs,\n upper = gs_spending_bound,\n upar = list(\n sf = gsDesign::sfLDOF,\n total_spend = alphal\n ),\n lower = gs_spending_bound,\n lpar = list(\n sf = gsDesign::sfHSD,\n total_spend = 1 - power_pfs,\n param = -10\n ),\n info_scale = \"h0_info\"\n)\n\npfs_gsDesign2 |>\n summary() |>\n as_gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Bound summary for AHR design
AHR approximations of ~HR at bound
BoundZNominal p1~HR at bound2\n
Cumulative boundary crossing probability
\n
Alternate hypothesisNull hypothesis
Analysis: 1 Time: 25.3 N: 405.8 Events: 179.2 AHR: 0.6 Information fraction: 0.75
Futility0.740.22870.89400.00410.7713
Efficacy2.660.00390.66970.76680.0039
Analysis: 2 Time: 34 N: 405.8 Events: 238.9 AHR: 0.6 Information fraction: 1
Futility2.280.01130.74240.05000.9875
Efficacy2.280.01130.74240.95000.0125
1 One-sided p-value for experimental vs control treatment. Value < 0.5 favors experimental, > 0.5 favors control.
2 Approximate hazard ratio to cross bound.
\n
\n```\n\n:::\n:::\n\n\n- OS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfail_rate_os <- tibble(\n stratum = \"All\",\n duration = Inf, # Can be set to `Inf` when proportional hazard is assumed\n fail_rate = log(2) / med_os,\n hr = hr1_os,\n dropout_rate = do_rate_os\n)\n\nos_gsDesign2 <- gsDesign2::gs_design_ahr(\n enroll_rate = pfs_gsDesign2$enroll_rate,\n fail_rate = fail_rate_os,\n ratio = rand_ratio,\n beta = 1 - power_os,\n alpha = alphal,\n info_frac = timing_os,\n analysis_time = enroll_dur + minfu_os,\n test_lower = FALSE,\n upper = gs_spending_bound,\n upar = list(\n sf = gsDesign::sfLDOF,\n total_spend = alphal\n ),\n info_scale = \"h0_info\"\n)\n\nos_gsDesign2 |>\n summary() |>\n as_gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Bound summary for AHR design
AHR approximations of ~HR at bound
BoundZNominal p1~HR at bound2\n
Cumulative boundary crossing probability
\n
Alternate hypothesisNull hypothesis
Analysis: 1 Time: 38.4 N: 402.6 Events: 133.7 AHR: 0.65 Information fraction: 0.6
Efficacy3.020.00130.59010.28990.0013
Analysis: 2 Time: 50.6 N: 402.6 Events: 178.2 AHR: 0.65 Information fraction: 0.8
Efficacy2.590.00480.67620.60820.0052
Analysis: 3 Time: 66 N: 402.6 Events: 222.8 AHR: 0.65 Information fraction: 1
Efficacy2.300.01080.73300.82000.0125
1 One-sided p-value for experimental vs control treatment. Value < 0.5 favors experimental, > 0.5 favors control.
2 Approximate hazard ratio to cross bound.
\n
\n```\n\n:::\n:::\n\n\n### Example using rpact\n\n- PFS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npfs_rpact_gsd <- rpact::getDesignGroupSequential(\n sided = 1,\n alpha = alphal,\n informationRates = timing_pfs,\n typeOfDesign = \"asOF\",\n beta = 1 - power_pfs,\n typeBetaSpending = \"bsHSD\",\n gammaB = -10,\n bindingFutility = FALSE\n)\n\npfs_rpact <- rpact::getSampleSizeSurvival(\n design = pfs_rpact_gsd,\n accrualTime = enroll_dur,\n followUpTime = minfu_pfs,\n lambda2 = log(2) / med_pfs,\n hazardRatio = hr1_pfs,\n dropoutRate1 = 0.2,\n dropoutRate2 = 0.2,\n dropoutTime = 12\n)\n\nkable(summary(pfs_rpact))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in kable.ParameterSet(summary(pfs_rpact)): Manual use of kable() for\nrpact result objects is no longer needed, as the formatting and display will be\nhandled automatically by the rpact package\n```\n\n\n:::\n\n::: {.cell-output-display}\n*Sample size calculation for a survival endpoint*\n\nSequential analysis with a maximum of 2 looks (group sequential design), \none-sided overall significance level 1.25%, power 95%.\nThe results were calculated for a two-sample logrank test, \nH0: hazard ratio = 1, H1: hazard ratio = 0.6, control lambda(2) = 0.074, \naccrual time = 24, accrual intensity = 16.5, follow-up time = 10, \ndropout rate(1) = 0.2, dropout rate(2) = 0.2, dropout time = 12.\n\n| Stage | 1 | 2 |\n| ----- | ----- | ----- |\n| Planned information rate | 75% | 100% |\n| Cumulative alpha spent | 0.0039 | 0.0125 |\n| Cumulative beta spent | 0.0041 | 0.0500 |\n| Stage levels (one-sided) | 0.0039 | 0.0113 |\n| Efficacy boundary (z-value scale) | 2.658 | 2.280 |\n| Futility boundary (z-value scale) | 0.743 | |\n| Efficacy boundary (t) | 0.670 | 0.742 |\n| Futility boundary (t) | 0.894 | |\n| Cumulative power | 0.7668 | 0.9500 |\n| Number of subjects | 396.9 | 396.9 |\n| Expected number of subjects under H1 | | 396.9 |\n| Cumulative number of events | 175.8 | 234.4 |\n| Expected number of events under H1 | | 189.2 |\n| Analysis time | 25.36 | 34.00 |\n| Expected study duration under H1 | | 27.34 |\n| Overall exit probability (under H0) | 0.7752 | |\n| Overall exit probability (under H1) | 0.7709 | |\n| Exit probability for efficacy (under H0) | 0.0039 | |\n| Exit probability for efficacy (under H1) | 0.7668 | |\n| Exit probability for futility (under H0) | 0.7713 | |\n| Exit probability for futility (under H1) | 0.0041 | |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n\nNote: the `dropoutRate1`, `dropoutRate2` arguments in `getSampleSizeSurvival()` refer to the % of drop-outs by the `dropoutTime`, while the `eta` argument in `gsDesign::gsSurv()` and the `dropout_rate` value in the `fail_rate` argument in `gsDesign2::gs_design_ahr()` refer to the annual drop-out rate parameter under the exponential distribution. In our example, if $X$ is a drop-out time and $X \\sim \\text{Exponential} (\\lambda)$, we assume that by month 12 the drop-out rate was 20%, which implies: $P(X\\le12) = 1 - e^{-12\\lambda} = 0.2 \\Rightarrow \\lambda = 0.019$. Due to the above differences, the value $\\lambda = 0.019$ was used in the gsDesign and gsDesign2 example, while 0.2 was used in the rpact example.\n\n- OS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nos_rpact_gsd <- rpact::getDesignGroupSequential(\n sided = 1,\n alpha = alphal,\n informationRates = timing_os,\n typeOfDesign = \"asOF\",\n beta = 1 - power_os\n)\n\nos_rpact <- rpact::getSampleSizeSurvival(\n design = os_rpact_gsd,\n accrualTime = enroll_dur,\n followUpTime = minfu_os,\n lambda2 = log(2) / med_os,\n hazardRatio = hr1_os,\n dropoutRate1 = 1 - exp(-do_rate_os * 12),\n dropoutRate2 = 1 - exp(-do_rate_os * 12),\n dropoutTime = 12\n)\n\nkable(summary(os_rpact))\n```\n\n::: {.cell-output-display}\n*Sample size calculation for a survival endpoint*\n\nSequential analysis with a maximum of 3 looks (group sequential design), \none-sided overall significance level 1.25%, power 82%.\nThe results were calculated for a two-sample logrank test, \nH0: hazard ratio = 1, H1: hazard ratio = 0.65, control lambda(2) = 0.019, \naccrual time = 24, accrual intensity = 16.5, follow-up time = 42, \ndropout rate(1) = 0.012, dropout rate(2) = 0.012, dropout time = 12.\n\n| Stage | 1 | 2 | 3 |\n| ----- | ----- | ----- | ----- |\n| Planned information rate | 60% | 80% | 100% |\n| Cumulative alpha spent | 0.0013 | 0.0052 | 0.0125 |\n| Stage levels (one-sided) | 0.0013 | 0.0048 | 0.0108 |\n| Efficacy boundary (z-value scale) | 3.020 | 2.587 | 2.296 |\n| Efficacy boundary (t) | 0.590 | 0.676 | 0.733 |\n| Cumulative power | 0.2899 | 0.6082 | 0.8200 |\n| Number of subjects | 395.1 | 395.1 | 395.1 |\n| Expected number of subjects under H1 | | | 395.1 |\n| Cumulative number of events | 131.2 | 174.9 | 218.6 |\n| Expected number of events under H1 | | | 179.4 |\n| Analysis time | 38.44 | 50.60 | 66.00 |\n| Expected study duration under H1 | | | 53.11 |\n| Exit probability for efficacy (under H0) | 0.0013 | 0.0040 | |\n| Exit probability for efficacy (under H1) | 0.2899 | 0.3182 | |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n", + "markdown": "---\ntitle: \"Group sequential design in R\"\neditor_options:\n chunk_output_type: console\n---\n\n## Group sequential design: time-to-event endpoint\n\nWhile a group sequential design (GSD) could be applied for different types of endpoints, here we focus on time-to-event endpoints.\n\n## Available R packages\n\nThe commonly used R packages for power and sample size calculations utilizing a GSD are: [gsDesign](https://keaven.github.io/gsDesign/) (also has a [web interface](https://rinpharma.shinyapps.io/gsdesign/)), [gsDesign2](https://merck.github.io/gsDesign2/), and [rpact](https://www.rpact.org/).\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(gsDesign)\nlibrary(gsDesign2)\nlibrary(rpact)\nlibrary(tibble)\n```\n:::\n\n\n## Design assumptions\n\nUsing a toy example, we will assume that a primary objective of a phase III oncology trial is to compare a new therapy to a control in terms of progression-free survival (PFS) and overall survival (OS). Note that, in this example, we have a family of primary endpoints, i.e., if at least one of the endpoints is successful, the study will be declared a success. A GSD will be utilized for each endpoint. PFS will be tested at one interim analysis (IA) for both efficacy and non-binding futility, while OS will be tested at two IAs for efficacy only. An O'Brien-Fleming spending function will be used for efficacy testing and a Hwang-Shih-Decani spending function with $\\gamma = -10$ will be used for futility.\n\nFurther design assumptions are as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# PFS HR = 0.6\nhr1_pfs <- 0.6\n# Median PFS of 9.4 months in the control arm\nmed_pfs <- 9.4\n# Median follow-up of 10 months for PFS\nminfu_pfs <- 10\n# Monthly dropout of 0.019 for PFS\ndo_rate_pfs <- 0.019\n# IA timing for PFS is at 75% information fraction\ntiming_pfs <- c(0.75, 1)\n# Power of 95% for PFS\npower_pfs <- 0.95\n\n# OS HR = 0.65\nhr1_os <- 0.65\n# Median OS of 3 years in the control arm\nmed_os <- 12 * 3\n# Median follow-up of 42 months for OS\nminfu_os <- 42\n# Monthly dropout of 0.001 for OS\ndo_rate_os <- 0.001\n# IA timing for OS is at 60% and 80% information fraction\ntiming_os <- c(0.6, 0.8, 1)\n# Power of 82% for OS\npower_os <- 0.82\n\n# Enrollment period of 24 months\nenroll_dur <- 24\n# 1:1 randomization ratio\nrand_ratio <- 1\n# alpha level of 1.25% for each endpoint\nalphal <- 0.0125\n```\n:::\n\n\nWe assume that given the above assumptions, we need to calculate the target number of events for each analysis as well as the total sample size.\n\n## Example code\n\n### Example using gsDesign\n\n- PFS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npfs_gsDesign <- gsDesign::gsSurv(\n k = length(timing_pfs),\n timing = timing_pfs,\n R = enroll_dur,\n eta = do_rate_pfs,\n minfup = minfu_pfs,\n T = enroll_dur + minfu_pfs,\n lambdaC = log(2) / med_pfs,\n hr = hr1_pfs,\n beta = 1 - power_pfs,\n alpha = alphal,\n sfu = sfLDOF,\n sfl = sfHSD,\n sflpar = -10,\n test.type = 4\n)\n\npfs_gsDesign |>\n gsDesign::gsBoundSummary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nMethod: LachinFoulkes \n Analysis Value Efficacy Futility\n IA 1: 75% Z 2.6584 0.7432\n N: 398 p (1-sided) 0.0039 0.2287\n Events: 176 ~HR at bound 0.6693 0.8938\n Month: 25 P(Cross) if HR=1 0.0039 0.7713\n P(Cross) if HR=0.6 0.7668 0.0041\n Final Z 2.2801 2.2801\n N: 398 p (1-sided) 0.0113 0.0113\n Events: 234 ~HR at bound 0.7421 0.7421\n Month: 34 P(Cross) if HR=1 0.0125 0.9875\n P(Cross) if HR=0.6 0.9500 0.0500\n```\n\n\n:::\n:::\n\n\n- OS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nos_gsDesign <- gsDesign::gsSurv(\n k = length(timing_os),\n timing = timing_os,\n R = enroll_dur,\n eta = do_rate_os,\n minfup = minfu_os,\n T = enroll_dur + minfu_os,\n lambdaC = log(2) / med_os,\n hr = hr1_os,\n beta = 1 - power_os,\n alpha = alphal,\n sfu = sfLDOF,\n test.type = 1\n)\n\nos_gsDesign |>\n gsDesign::gsBoundSummary()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nMethod: LachinFoulkes \n Analysis Value Efficacy\n IA 1: 60% Z 3.0205\n N: 394 p (1-sided) 0.0013\n Events: 131 ~HR at bound 0.5896\n Month: 38 P(Cross) if HR=1 0.0013\n P(Cross) if HR=0.65 0.2899\n IA 2: 80% Z 2.5874\n N: 394 p (1-sided) 0.0048\n Events: 175 ~HR at bound 0.6758\n Month: 51 P(Cross) if HR=1 0.0052\n P(Cross) if HR=0.65 0.6082\n Final Z 2.2958\n N: 394 p (1-sided) 0.0108\n Events: 218 ~HR at bound 0.7327\n Month: 66 P(Cross) if HR=1 0.0125\n P(Cross) if HR=0.65 0.8200\n```\n\n\n:::\n:::\n\n\n### Example using gsDesign2\n\n- PFS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nenroll_rate <- tibble(\n stratum = \"All\",\n duration = enroll_dur,\n rate = 1\n)\nfail_rate_pfs <- tibble(\n stratum = \"All\",\n duration = Inf, # Can be set to `Inf` when proportional hazard is assumed\n fail_rate = log(2) / med_pfs,\n hr = hr1_pfs,\n dropout_rate = do_rate_pfs\n)\n\npfs_gsDesign2 <- gsDesign2::gs_design_ahr(\n enroll_rate = enroll_rate,\n fail_rate = fail_rate_pfs,\n ratio = rand_ratio,\n beta = 1 - power_pfs,\n alpha = alphal,\n info_frac = timing_pfs,\n analysis_time = enroll_dur + minfu_pfs,\n upper = gs_spending_bound,\n upar = list(\n sf = gsDesign::sfLDOF,\n total_spend = alphal\n ),\n lower = gs_spending_bound,\n lpar = list(\n sf = gsDesign::sfHSD,\n total_spend = 1 - power_pfs,\n param = -10\n ),\n info_scale = \"h0_info\"\n)\n\npfs_gsDesign2 |>\n summary() |>\n as_gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Bound summary for AHR design
AHR approximations of ~HR at bound
BoundZNominal p1~HR at bound2\n
Cumulative boundary crossing probability
\n
Alternate hypothesisNull hypothesis
Analysis: 1 Time: 25.3 N: 405.8 Events: 179.2 AHR: 0.6 Information fraction: 0.75
Futility0.740.22870.89400.00410.7713
Efficacy2.660.00390.66970.76680.0039
Analysis: 2 Time: 34 N: 405.8 Events: 238.9 AHR: 0.6 Information fraction: 1
Futility2.280.01130.74240.05000.9875
Efficacy2.280.01130.74240.95000.0125
1 One-sided p-value for experimental vs control treatment. Value < 0.5 favors experimental, > 0.5 favors control.
2 Approximate hazard ratio to cross bound.
\n
\n```\n\n:::\n:::\n\n\n- OS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfail_rate_os <- tibble(\n stratum = \"All\",\n duration = Inf, # Can be set to `Inf` when proportional hazard is assumed\n fail_rate = log(2) / med_os,\n hr = hr1_os,\n dropout_rate = do_rate_os\n)\n\nos_gsDesign2 <- gsDesign2::gs_design_ahr(\n enroll_rate = pfs_gsDesign2$enroll_rate,\n fail_rate = fail_rate_os,\n ratio = rand_ratio,\n beta = 1 - power_os,\n alpha = alphal,\n info_frac = timing_os,\n analysis_time = enroll_dur + minfu_os,\n test_lower = FALSE,\n upper = gs_spending_bound,\n upar = list(\n sf = gsDesign::sfLDOF,\n total_spend = alphal\n ),\n info_scale = \"h0_info\"\n)\n\nos_gsDesign2 |>\n summary() |>\n as_gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n \n \n \n \n \n \n \n \n
Bound summary for AHR design
AHR approximations of ~HR at bound
BoundZNominal p1~HR at bound2\n
Cumulative boundary crossing probability
\n
Alternate hypothesisNull hypothesis
Analysis: 1 Time: 38.4 N: 402.6 Events: 133.7 AHR: 0.65 Information fraction: 0.6
Efficacy3.020.00130.59010.28990.0013
Analysis: 2 Time: 50.6 N: 402.6 Events: 178.2 AHR: 0.65 Information fraction: 0.8
Efficacy2.590.00480.67620.60820.0052
Analysis: 3 Time: 66 N: 402.6 Events: 222.8 AHR: 0.65 Information fraction: 1
Efficacy2.300.01080.73300.82000.0125
1 One-sided p-value for experimental vs control treatment. Value < 0.5 favors experimental, > 0.5 favors control.
2 Approximate hazard ratio to cross bound.
\n
\n```\n\n:::\n:::\n\n\n### Example using rpact\n\n- PFS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npfs_rpact_gsd <- rpact::getDesignGroupSequential(\n sided = 1,\n alpha = alphal,\n informationRates = timing_pfs,\n typeOfDesign = \"asOF\",\n beta = 1 - power_pfs,\n typeBetaSpending = \"bsHSD\",\n gammaB = -10,\n bindingFutility = FALSE\n)\n\npfs_rpact <- rpact::getSampleSizeSurvival(\n design = pfs_rpact_gsd,\n accrualTime = enroll_dur,\n followUpTime = minfu_pfs,\n lambda2 = log(2) / med_pfs,\n hazardRatio = hr1_pfs,\n dropoutRate1 = 0.2,\n dropoutRate2 = 0.2,\n dropoutTime = 12\n)\n\nkable(summary(pfs_rpact))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in kable.ParameterSet(summary(pfs_rpact)): Manual use of kable() for\nrpact result objects is no longer needed, as the formatting and display will be\nhandled automatically by the rpact package\n```\n\n\n:::\n\n::: {.cell-output-display}\n*Sample size calculation for a survival endpoint*\n\nSequential analysis with a maximum of 2 looks (group sequential design), \none-sided overall significance level 1.25%, power 95%.\nThe results were calculated for a two-sample logrank test, \nH0: hazard ratio = 1, H1: hazard ratio = 0.6, control lambda(2) = 0.074, \naccrual time = 24, accrual intensity = 16.5, follow-up time = 10, \ndropout rate(1) = 0.2, dropout rate(2) = 0.2, dropout time = 12.\n\n| Stage | 1 | 2 |\n| ----- | ----- | ----- |\n| Planned information rate | 75% | 100% |\n| Cumulative alpha spent | 0.0039 | 0.0125 |\n| Cumulative beta spent | 0.0041 | 0.0500 |\n| Stage levels (one-sided) | 0.0039 | 0.0113 |\n| Efficacy boundary (z-value scale) | 2.658 | 2.280 |\n| Futility boundary (z-value scale) | 0.743 | |\n| Efficacy boundary (t) | 0.670 | 0.742 |\n| Futility boundary (t) | 0.894 | |\n| Cumulative power | 0.7668 | 0.9500 |\n| Number of subjects | 396.9 | 396.9 |\n| Expected number of subjects under H1 | | 396.9 |\n| Cumulative number of events | 175.8 | 234.4 |\n| Expected number of events under H1 | | 189.2 |\n| Analysis time | 25.36 | 34.00 |\n| Expected study duration under H1 | | 27.34 |\n| Overall exit probability (under H0) | 0.7752 | |\n| Overall exit probability (under H1) | 0.7709 | |\n| Exit probability for efficacy (under H0) | 0.0039 | |\n| Exit probability for efficacy (under H1) | 0.7668 | |\n| Exit probability for futility (under H0) | 0.7713 | |\n| Exit probability for futility (under H1) | 0.0041 | |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n\nNote: the `dropoutRate1`, `dropoutRate2` arguments in `getSampleSizeSurvival()` refer to the % of drop-outs by the `dropoutTime`, while the `eta` argument in `gsDesign::gsSurv()` and the `dropout_rate` value in the `fail_rate` argument in `gsDesign2::gs_design_ahr()` refer to the annual drop-out rate parameter under the exponential distribution. In our example, if $X$ is a drop-out time and $X \\sim \\text{Exponential} (\\lambda)$, we assume that by month 12 the drop-out rate was 20%, which implies: $P(X\\le12) = 1 - e^{-12\\lambda} = 0.2 \\Rightarrow \\lambda = 0.019$. Due to the above differences, the value $\\lambda = 0.019$ was used in the gsDesign and gsDesign2 example, while 0.2 was used in the rpact example.\n\n- OS calculations:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nos_rpact_gsd <- rpact::getDesignGroupSequential(\n sided = 1,\n alpha = alphal,\n informationRates = timing_os,\n typeOfDesign = \"asOF\",\n beta = 1 - power_os\n)\n\nos_rpact <- rpact::getSampleSizeSurvival(\n design = os_rpact_gsd,\n accrualTime = enroll_dur,\n followUpTime = minfu_os,\n lambda2 = log(2) / med_os,\n hazardRatio = hr1_os,\n dropoutRate1 = 1 - exp(-do_rate_os * 12),\n dropoutRate2 = 1 - exp(-do_rate_os * 12),\n dropoutTime = 12\n)\n\nkable(summary(os_rpact))\n```\n\n::: {.cell-output-display}\n*Sample size calculation for a survival endpoint*\n\nSequential analysis with a maximum of 3 looks (group sequential design), \none-sided overall significance level 1.25%, power 82%.\nThe results were calculated for a two-sample logrank test, \nH0: hazard ratio = 1, H1: hazard ratio = 0.65, control lambda(2) = 0.019, \naccrual time = 24, accrual intensity = 16.5, follow-up time = 42, \ndropout rate(1) = 0.012, dropout rate(2) = 0.012, dropout time = 12.\n\n| Stage | 1 | 2 | 3 |\n| ----- | ----- | ----- | ----- |\n| Planned information rate | 60% | 80% | 100% |\n| Cumulative alpha spent | 0.0013 | 0.0052 | 0.0125 |\n| Stage levels (one-sided) | 0.0013 | 0.0048 | 0.0108 |\n| Efficacy boundary (z-value scale) | 3.020 | 2.587 | 2.296 |\n| Efficacy boundary (t) | 0.590 | 0.676 | 0.733 |\n| Cumulative power | 0.2899 | 0.6082 | 0.8200 |\n| Number of subjects | 395.1 | 395.1 | 395.1 |\n| Expected number of subjects under H1 | | | 395.1 |\n| Cumulative number of events | 131.2 | 174.9 | 218.6 |\n| Expected number of events under H1 | | | 179.4 |\n| Analysis time | 38.44 | 50.60 | 66.00 |\n| Expected study duration under H1 | | | 53.11 |\n| Exit probability for efficacy (under H0) | 0.0013 | 0.0040 | |\n| Exit probability for efficacy (under H1) | 0.2899 | 0.3182 | |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/kolmogorov-smirnov_test/execute-results/html.json b/_freeze/R/kolmogorov-smirnov_test/execute-results/html.json index 17015c42e..a3abf8c8d 100644 --- a/_freeze/R/kolmogorov-smirnov_test/execute-results/html.json +++ b/_freeze/R/kolmogorov-smirnov_test/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "8d88ef01e73e0cf3432d8343bf86fb2b", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R: Kolmogorov-Smirnov test\"\n---\n\n# Introduction\n\nKolmogorov-Smirnov (K-S) test is a non-parametric test employed to check whether the probability distributions of a sample and a control distribution, or two samples are equal. It is constructed based on the cumulative distribution function (CDF) and calculates the greatest difference between the empirical distribution function (EDF) of the sample and the theoretical or empirical distribution of the control sample.\n\nThe Kolmogorov-Smirnov test is mostly used for two purposes:\n\n1. **One-sample K-S test**: To compare the sample distribution to a known reference distribution.\n\n2. **Two-sample K-S test**: To compare the two independent samples' distributions.\n\nThe K-S test is formulated on the basis of the maximum difference between the observed and expected cumulative distribution functions (CDFs). The test is non-parametric, as it does not assume any specific distribution for the sample data. This makes it especially helpful in testing the goodness-of-fit for continuous distributions.\n\n# Libraries or Extensions Needed\n\nTo perform the Kolmogorov-Smirnov test in R, we will use the `ks.test()` function from the `dgof` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(dgof)\n```\n:::\n\n\n# Data Sources for the Analysis\n\nWe will use the `lung` dataset from the `survival` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nattach(lung)\n```\n:::\n\n\nDetails about the lung dataset can be found in the documentation for the `survival` package, which is available at .\n\n# Statistical Method\n\n## One-sample K-S test\n\nFor this example, we will test whether the Karnofsky performance score rated by physician (`ph.karno`), and Karnofsky performance score rated by patient (`pat.karno`) follow a normal distribution.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nks.test(ph.karno, \"pnorm\")\nks.test(pat.karno, \"pnorm\")\n```\n:::\n\n\nBoth tests have p-values \\< 2.2e-16, which indicates that the distributions of `ph.karno` and `pat.karno` are significantly different from a normal distribution.\n\n## Two-sample K-S test\n\nNext, we will compare the distributions of `ph.karno` and `pat.karno` using the two-sample K-S test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nks.test(ph.karno, pat.karno)\n```\n:::\n\n\nThe p-value of 0.2084 suggests that there is no significant difference between the distributions of `ph.karno` and `pat.karno`. This indicates that the Karnofsky performance scores rated by physicians and patients are not significantly different in terms of their distribution.\n\n# Conclusion\n\nWe demonstrated the use of the Kolmogorov-Smirnov test in R using the ks.test() function from the dgof package, which is straightforward and handy to use. As far as we are aware of, this is the most widely used function in R to perform the Kolmogorov-Smirnov test.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-26\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R dgof [?] \n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n R ── Package was removed from disk.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"R: Kolmogorov-Smirnov test\"\n---\n\n# Introduction\n\nKolmogorov-Smirnov (K-S) test is a non-parametric test employed to check whether the probability distributions of a sample and a control distribution, or two samples are equal. It is constructed based on the cumulative distribution function (CDF) and calculates the greatest difference between the empirical distribution function (EDF) of the sample and the theoretical or empirical distribution of the control sample.\n\nThe Kolmogorov-Smirnov test is mostly used for two purposes:\n\n1. **One-sample K-S test**: To compare the sample distribution to a known reference distribution.\n\n2. **Two-sample K-S test**: To compare the two independent samples' distributions.\n\nThe K-S test is formulated on the basis of the maximum difference between the observed and expected cumulative distribution functions (CDFs). The test is non-parametric, as it does not assume any specific distribution for the sample data. This makes it especially helpful in testing the goodness-of-fit for continuous distributions.\n\n# Libraries or Extensions Needed\n\nTo perform the Kolmogorov-Smirnov test in R, we will use the `ks.test()` function from the `dgof` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(dgof)\n```\n:::\n\n\n# Data Sources for the Analysis\n\nWe will use the `lung` dataset from the `survival` package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nattach(lung)\n```\n:::\n\n\nDetails about the lung dataset can be found in the documentation for the `survival` package, which is available at .\n\n# Statistical Method\n\n## One-sample K-S test\n\nFor this example, we will test whether the Karnofsky performance score rated by physician (`ph.karno`), and Karnofsky performance score rated by patient (`pat.karno`) follow a normal distribution.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nks.test(ph.karno, \"pnorm\")\nks.test(pat.karno, \"pnorm\")\n```\n:::\n\n\nBoth tests have p-values \\< 2.2e-16, which indicates that the distributions of `ph.karno` and `pat.karno` are significantly different from a normal distribution.\n\n## Two-sample K-S test\n\nNext, we will compare the distributions of `ph.karno` and `pat.karno` using the two-sample K-S test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nks.test(ph.karno, pat.karno)\n```\n:::\n\n\nThe p-value of 0.2084 suggests that there is no significant difference between the distributions of `ph.karno` and `pat.karno`. This indicates that the Karnofsky performance scores rated by physicians and patients are not significantly different in terms of their distribution.\n\n# Conclusion\n\nWe demonstrated the use of the Kolmogorov-Smirnov test in R using the ks.test() function from the dgof package, which is straightforward and handy to use. As far as we are aware of, this is the most widely used function in R to perform the Kolmogorov-Smirnov test.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R dgof [?] \n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n R ── Package was removed from disk.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/logistic_regr/execute-results/html.json b/_freeze/R/logistic_regr/execute-results/html.json index 87fdf980f..c551d6332 100644 --- a/_freeze/R/logistic_regr/execute-results/html.json +++ b/_freeze/R/logistic_regr/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "39852b48cd43fbaa59d09c59cadeb8bb", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Logistic Regression in R\"\n---\n\n\n\nIn binary logistic regression, there is a single binary dependent variable, coded by an indicator variable. For example, if we represent a response as 1 and non-response as 0, then the corresponding probability of response, can be between 0 (certainly not a response) and 1 (certainly a response) - hence the labeling !\n\nThe logistic model models the log-odds of an event as a linear combination of one or more independent variables (explanatory variables). If we observed $(y_i, x_i),$ where $y_i$ is a Bernoulli variable and $x_i$ a vector of explanatory variables, the model for $\\pi_i = P(y_i=1)$ is\n\n$$\n\\text{logit}(\\pi_i)= \\log\\left\\{ \\frac{\\pi_i}{1-\\pi_i}\\right\\} = \\beta_0 + \\beta x_i, i = 1,\\ldots,n \n$$\n\nThe model is especially useful in case-control studies and leads to the effect of risk factors by odds ratios.\n\n# Example: Lung Cancer Data\n\n*Data source: Loprinzi CL. Laurie JA. Wieand HS. Krook JE. Novotny PJ. Kugler JW. Bartel J. Law M. Bateman M. Klatt NE. et al. Prospective evaluation of prognostic variables from patient-completed questionnaires. North Central Cancer Treatment Group. Journal of Clinical Oncology. 12(3):601-7, 1994.*\n\nwgt_catn consists of: 1= patients a weight loss of zero or less, 0= patients with a weight loss of more than zero\n\ntrt01pn consists of 1= active treatment, 0 = placebo\n\n# Model Fit\n\nWe analyze the event of weight gain (or staying the same weight) in lung cancer patients in dependency of treatment (active or placebo), age, sex, ECOG performance score and calories consumed at meals. One of the most important things to remember is to ensure you tell R what your event is and what treatment comparison you are doing Active / Placebo or Placebo/Active! The easiest way to do this is to have event (or non-reference treatment) as 1, and non-event (reference treatment) as 0.\n\nBelow we are using wt_catn (0,1) and trt01pn (1,2) and sex (1,2). Let's see what happens !\n\nhead(lung)\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlung <- read.csv(\"../data/lung_cancer.csv\")\nhead(lung)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n inst time status age sex ph.ecog ph.karno pat.karno meal.cal wt.loss trt01p\n1 3 306 2 74 1 1 90 100 1175 NA Active\n2 3 455 2 68 1 0 90 90 1225 15 Active\n3 3 1010 1 56 1 0 90 90 NA 15 Active\n4 5 210 2 57 1 1 90 60 1150 11 Active\n5 1 883 2 60 1 0 100 90 NA 0 Active\n6 12 1022 1 74 1 1 50 80 513 0 Active\n trt01pn dose_mg dose_id wt_cat wt_catn cnsr\n1 1 10 1 NA 0\n2 1 10 1 loss 0 0\n3 1 10 1 loss 0 1\n4 1 10 1 loss 0 0\n5 1 10 1 gain 1 0\n6 1 10 1 gain 1 1\n```\n\n\n:::\n\n```{.r .cell-code}\nm1 <- glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\")\n)\nsummary(m1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal, \n family = binomial(link = \"logit\"), data = lung)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -3.8623998 1.7675776 -2.185 0.0289 *\ntrt01pn 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n:::\n\n\nR by default sets the first category as the baseline category, hence trt01pn =1 and sex =1 are the baseline level, and other levels of the variable are contrasted to this level. This is using `contr.treatment` option (more information on this later!). The estimate for those variables (0.3887 and 0.8321) are the increase in the log-odds of being of receiving treatment 2 vs 1, and of being of sex=2 vs 1. The exponential of the estimate is the odds ratio. For example, exp(0.3887)=1.475, hence Treatment 2 is 1.475 times as likely to have weight gain compared to Treatment 1.\n\nThe intercept represents the baseline log odds of the outcome when all predictor variables are set to zero. In the above model, we have variables treatment and sex, each coded as 1 and 2. Currently R is not treating these as binary factors, instead R thinks they are a continuous variable, and hence the intercept is where treatment=0 and sex=0. In the context of our model, this doesn't make sense as you can't have a zero gender and zero treatment (but it may not matter as we rarely look at the intercept term anyway!)\n\nHowever, if you want to have an interpretable intercept term (and if you want to match SAS output for intercept!), then it's important to ensure any factors in your model as fitted as such in R. You can do this using: `lung$trt01pn<-as.factor(lung$trt01pn)` or by changing the variable to a 0 and 1. Note if you are using the same `contr.treatment` option, then this only affects the intercept estimate not the variable estimates.\n\n## Modelling factors correctly and Interpretation of the Intercept\n\nSo far we've learnt that it is good practice to always have binary and categorical variables set as factors, and that we should specify what method we want R to use for doing any contrasts (e.g. `contr.treatment`). You can specify different contrast methods for each variable by including them in a list in the model. It can sometimes be hard to see what contrast method R is using, so best to always specify this in your model (e.g. `contrasts = list(trt01pn = \"contr.treatment\", sex=\"contr.sum\")`. It is helpful to view what the contrasts look like before you select which to use.\n\nA factor with 2 levels (2 treatments) using `contr.treatment` would set the first level to be the reference (0) and contrast the second level to the baseline.\n\nA factor with 4 levels (4 treatments) using `contr.treatment` would set the first level to be the reference (0), you would see 3 parameters in the model with 3 estimates corresponding to the increase in log-odds attributable to contrasting the second, third or fourth level to the baseline.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontr.treatment(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2\n1 0\n2 1\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.treatment(4)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2 3 4\n1 0 0 0\n2 1 0 0\n3 0 1 0\n4 0 0 1\n```\n\n\n:::\n:::\n\n\nBelow we apply `contr.treatment` to our trt01pn and sex variables. Note: how the variables trt01pn2 and sex2 are now shown in the output, this is indicating that these rows relates to treatment=2 and sex=2. See the next section for how to interpret the estimates and change this `contr.` option.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Good practice to have binary variables (eg. treatment) identified as a factor\n# And to specify what contrast option you are using ! more on this below\nlung$trt01pn <- as.factor(lung$trt01pn)\nlung$sex <- as.factor(lung$sex)\n\nm1 <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\")\n)\nsummary(m1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.6415326 1.5140191 -1.745 0.0810 .\ntrt01pn2 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n:::\n\n\n# Model parameter estimates\n\nThe model summary contains the parameter estimates $\\beta_j$ for each explanatory variable $x_j$, corresponding to the log-odds for the response variable to take the value $1$, conditional on all other explanatory variables remaining constant. For better interpretation, we can exponentiate these estimates, to obtain estimates for the odds instead and provide 95% confidence intervals.\n\n## How R parameterizes your variables in the model\n\n- contr.treatment - \\[default\\] sets the first level to be the reference and contrasts each other level with the baseline. For example, based on the model shown below. Log-odds for Treatment 2 = -2.64153 + 0.38876 =-2.25277 , Log-odds for Treatment 1=-2.64153\n\n- contr.sum - sets the last level to be the reference and compares the mean of the dependent variable for a given level to the overall mean of the dependent variable. For treatment 1, the intercept + trt01pn1 estimate. For Treatment 2, the intercept - trt01pn1 estimate.\\\n Log-odds for Treatment 2 = -2.44715 - - 0.19438 = -2.25277, Log-odds for Treatment 1=-2.44715 - 0.19438 = -2.64153\\\n use `contr.sum(2)` to show the contrast you are using and hence how to interpret your estimates.\n\n- exponential (ratio log-odds) = odds ratio. eg. -2.25277/ -2.64153 = 0.85283 Treatment 2 is 0.85 times as likely (eg. 15% less likely) to have weight gain compared to Treatment 1.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nma <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\")\n)\nsummary(ma)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.6415326 1.5140191 -1.745 0.0810 .\ntrt01pn2 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.treatment(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2\n1 0\n2 1\n```\n\n\n:::\n\n```{.r .cell-code}\nmb <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.sum\", sex = \"contr.treatment\")\n)\nsummary(mb)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.sum\", sex = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.4471493 1.4929863 -1.639 0.1012 \ntrt01pn1 -0.1943833 0.1890783 -1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.sum(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1]\n1 1\n2 -1\n```\n\n\n:::\n:::\n\n\n## Calculation of confidence intervals\n\nUsing the above model, you can output the estimates and confidence intervals using coef() and confint() and exponential back transforming using exp(). NOTE: that there are two types of confidence intervals that you can calculate. Function `confint.default` gives the Wald confidence limits, which is the default option in SAS `PROC LOGISTIC` procedure; whereas `confint` gives the profile-likelihood limits.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# model coefficients summary\nsummary(m1)$coefficients\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error z value Pr(>|z|)\n(Intercept) -2.6415326252 1.5140190574 -1.7447156 0.08103439\ntrt01pn2 0.3887666677 0.3781565596 1.0280574 0.30392281\nage 0.0122549015 0.0211552875 0.5792831 0.56239813\nsex2 0.8321005169 0.3743792762 2.2226137 0.02624186\nph.ecog -0.3763592487 0.2638321918 -1.4265100 0.15372119\nmeal.cal 0.0008499918 0.0004486401 1.8945961 0.05814593\n```\n\n\n:::\n\n```{.r .cell-code}\n# Wald confidence limits\ncbind(est = exp(coef(m1)), exp(confint.default(m1)))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est 2.5 % 97.5 %\n(Intercept) 0.07125198 0.003664896 1.385263\ntrt01pn2 1.47516031 0.702994248 3.095470\nage 1.01233030 0.971213751 1.055188\nsex2 2.29814096 1.103327506 4.786840\nph.ecog 0.68635572 0.409236994 1.151128\nmeal.cal 1.00085035 0.999970674 1.001731\n```\n\n\n:::\n\n```{.r .cell-code}\n# profile-likelihood limits\ncbind(est = exp(coef(m1)), exp(confint(m1)))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWaiting for profiling to be done...\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est 2.5 % 97.5 %\n(Intercept) 0.07125198 0.003312288 1.302587\ntrt01pn2 1.47516031 0.696210092 3.085089\nage 1.01233030 0.971670916 1.056194\nsex2 2.29814096 1.107651762 4.836770\nph.ecog 0.68635572 0.405659156 1.147452\nmeal.cal 1.00085035 0.999978126 1.001761\n```\n\n\n:::\n:::\n\n\n# Comparing 2 models\n\nTo compare two logistic models, the `residual deviances` (-2 \\* log likelihoods) are compared against a $\\chi^2$-distribution with degrees of freedom calculated using the difference in the two models' parameters. Below, the only difference is the inclusion/exclusion of age in the model, hence we test using $\\chi^2$ with 1 df. Here testing at the 5% level.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nm2 <- stats::glm(\n wt_catn ~ trt01pn + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\")\n)\nsummary(m2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + sex + ph.ecog + meal.cal, \n family = binomial(link = \"logit\"), data = lung, contrasts = list(trt01pn = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -1.8350766 0.5810727 -3.158 0.00159 **\ntrt01pn2 0.3681563 0.3761976 0.979 0.32777 \nsex2 0.7919227 0.3674936 2.155 0.03117 * \nph.ecog -0.3312031 0.2527586 -1.310 0.19008 \nmeal.cal 0.0007896 0.0004378 1.803 0.07131 . \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.80 on 165 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 200.8\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\nanova(m1, m2, test = \"LRT\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Deviance Table\n\nModel 1: wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal\nModel 2: wt_catn ~ trt01pn + sex + ph.ecog + meal.cal\n Resid. Df Resid. Dev Df Deviance Pr(>Chi)\n1 164 190.46 \n2 165 190.80 -1 -0.33867 0.5606\n```\n\n\n:::\n:::\n\n\nStackexchange [here](https://stats.stackexchange.com/questions/59879/logistic-regression-anova-chi-square-test-vs-significance-of-coefficients-ano) has a good article describing this method and the difference between comparing 2 models using the likelihood ratio tests versus using wald tests and Pr\\>chisq (from the maximum likelihood estimate). Note: `anova(m1, m2, test = \"Chisq\")` and using `test=\"LRT\"` as above are synonymous in this context.\n\n# Predicting likelihood of response for new patients\n\nPredictions from the model for the log-odds of a patient with new data to experience a weight loss are derived using `predict()`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# new female, symptomatic but completely ambulatory patient consuming 2500 calories\nnew_pt <- data.frame(\n trt01pn = 1,\n age = 48,\n sex = 2,\n ph.ecog = 1,\n meal.cal = 2500\n)\nnew_pt$trt01pn <- as.factor(new_pt$trt01pn)\nnew_pt$sex <- as.factor(new_pt$sex)\npredict(m1, new_pt, type = \"response\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 1 \n0.628882 \n```\n\n\n:::\n:::\n\n\n# Creating Treatment Contrasts for 2 treatments\n\n## {emmeans}\n\nHere we will use {emmeans} to output the log-odds of weight gain for treatment 1 and treatment 2.\n\nNOTE as per the output, these are on the logit scale, you need to exponentiate to get the odds (or use the type=\"response\" option).\n\nThe treatment comparison can also be output on the log-odds or back transformed scale as shown below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nm3 <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\")\n)\nsummary(m3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.6415326 1.5140191 -1.745 0.0810 .\ntrt01pn2 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.treatment(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2\n1 0\n2 1\n```\n\n\n:::\n\n```{.r .cell-code}\n# log-odds for each treatment\nlsm <- emmeans::emmeans(m3, \"trt01pn\")\nlsm\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trt01pn emmean SE df asymp.LCL asymp.UCL\n 1 -1.034 0.225 Inf -1.47 -0.5935\n 2 -0.645 0.302 Inf -1.24 -0.0529\n\nResults are averaged over the levels of: sex \nResults are given on the logit (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\n# log-odds ratios (treatment comparison): This does all pairwise comparisons\n# However as seen below, this is TRT 1 - TRT 2\npairs(lsm)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df z.ratio p.value\n trt01pn1 - trt01pn2 -0.389 0.378 Inf -1.028 0.3039\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\n# the below creates tests and CI's prior to back transformation (ratios of geometric means)\npairs(lsm, type = \"response\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast odds.ratio SE df null z.ratio p.value\n trt01pn1 / trt01pn2 0.678 0.256 Inf 1 -1.028 0.3039\n\nResults are averaged over the levels of: sex \nTests are performed on the log odds ratio scale \n```\n\n\n:::\n\n```{.r .cell-code}\n# see coefficients of the linear functions\ncoef(pairs(lsm))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trt01pn c.1\n1 1 1\n2 2 -1\n```\n\n\n:::\n\n```{.r .cell-code}\n# Output treatment contrasts 2 vs 1 and 95% CIs, the type=\"response\" option back transforms the results\ntrtdiff <- contrast(lsm, \"poly\")\ntrtdiff\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df z.ratio p.value\n linear 0.389 0.378 Inf 1.028 0.3039\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(trtdiff, type = \"response\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast odds.ratio SE df asymp.LCL asymp.UCL\n linear 1.48 0.558 Inf 0.703 3.1\n\nResults are averaged over the levels of: sex \nConfidence level used: 0.95 \nIntervals are back-transformed from the log odds ratio scale \n```\n\n\n:::\n:::\n\n\nIn Summary: Treatment 2 is on average 1.48 times as likely to have weight gain compared to treatment 1, however this is not statistically significant (95% Confidence interval = 0.703-3.100, p-value= 0.3039).\n\n# Creating Treatment Contrasts for 2 or more treatments\n\n{emmeans} can also be used to do specific contrasts, instead of investigating treatment (active vs placebo), suppose we now want to look at 2 dose groups vs placebo.\n\nWe have a 3 level treatment variable (dose_id), where 1=10mg, 2=20mg doses for active treatment and 3= placebo which is 0mg. We want to test the null hypothesis that 0.5\\**dose10mg + 0.5*\\*dose20mg - placebo = 0. That there is not difference between the doses.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlung2 <- lung |>\n mutate(dose_id2 = as.factor(lung$dose_id))\n\nm3 <- stats::glm(\n wt_catn ~ dose_id2 + age + sex + ph.ecog + meal.cal,\n data = lung2,\n family = binomial(link = \"logit\"),\n contrasts = list(dose_id2 = \"contr.treatment\")\n)\n\n# log-odds for each treatment\nlsm3 <- emmeans::emmeans(m3, \"dose_id2\")\nlsm3\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n dose_id2 emmean SE df asymp.LCL asymp.UCL\n 1 -0.903 0.296 Inf -1.48 -0.3228\n 2 -1.201 0.348 Inf -1.88 -0.5201\n 3 -0.643 0.302 Inf -1.23 -0.0518\n\nResults are averaged over the levels of: sex \nResults are given on the logit (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\ncontrast(lsm3, list(AveDose_vs_pbo = c(0.5, 0.5, -1)))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df z.ratio p.value\n AveDose_vs_pbo -0.41 0.38 Inf -1.078 0.2813\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(contrast(lsm3, list(AveDose_vs_pbo = c(0.5, 0.5, -1))))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df asymp.LCL asymp.UCL\n AveDose_vs_pbo -0.41 0.38 Inf -1.15 0.335\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n:::\n\n\nHere we found that on average there is -0.41 times the risk of weight gain on active vs placebo but that this is not statisticallly significantly different (95% CI -1.15 to 0.335, p-value = 0.2813).\n\nSee the emmeans vignette on creating bespoke contrasts [here](Comparisons%20and%20contrasts%20in%20emmeans).\n\n## {gmodels}\n\n{gmodels} is an alternative package to create contrasts instead of \\, you can use the `fit.contrast()` function from the `gmodels` package. The same result is obtained as \\.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlung2 <- lung |>\n mutate(dose_id2 = as.factor(lung$dose_id))\n\nm3 <- stats::glm(\n wt_catn ~ dose_id2 + age + sex + ph.ecog + meal.cal,\n data = lung2,\n family = binomial(link = \"logit\"),\n contrasts = list(dose_id2 = \"contr.treatment\")\n)\n\ngmodels::fit.contrast(m3, 'dose_id2', c(0.5, 0.5, -1), conf.int = 0.95)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error z value Pr(>|z|) lower CI\ndose_id2 c=( 0.5 0.5 -1 ) -0.4096323 0.3801683 -1.077502 0.2812558 -1.160322\n upper CI\ndose_id2 c=( 0.5 0.5 -1 ) 0.3410574\n```\n\n\n:::\n:::\n\n\n# Reference\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P emmeans * 2.0.1 2025-12-16 [?] RSPM\n P estimability 1.5.1 2024-05-12 [?] RSPM\n P gdata 3.0.1 2024-10-22 [?] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n P gmodels * 2.19.1 2024-03-06 [?] RSPM\n P gtools 3.9.5 2023-11-20 [?] RSPM\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM\n numDeriv 2016.8-1.1 2019-06-06 [1] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P tibble 3.3.1 2026-01-11 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n utf8 1.2.6 2025-06-08 [1] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n withr 3.0.2 2024-10-28 [1] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n", + "markdown": "---\ntitle: \"Logistic Regression in R\"\n---\n\n\n\nIn binary logistic regression, there is a single binary dependent variable, coded by an indicator variable. For example, if we represent a response as 1 and non-response as 0, then the corresponding probability of response, can be between 0 (certainly not a response) and 1 (certainly a response) - hence the labeling !\n\nThe logistic model models the log-odds of an event as a linear combination of one or more independent variables (explanatory variables). If we observed $(y_i, x_i),$ where $y_i$ is a Bernoulli variable and $x_i$ a vector of explanatory variables, the model for $\\pi_i = P(y_i=1)$ is\n\n$$\n\\text{logit}(\\pi_i)= \\log\\left\\{ \\frac{\\pi_i}{1-\\pi_i}\\right\\} = \\beta_0 + \\beta x_i, i = 1,\\ldots,n \n$$\n\nThe model is especially useful in case-control studies and leads to the effect of risk factors by odds ratios.\n\n# Example: Lung Cancer Data\n\n*Data source: Loprinzi CL. Laurie JA. Wieand HS. Krook JE. Novotny PJ. Kugler JW. Bartel J. Law M. Bateman M. Klatt NE. et al. Prospective evaluation of prognostic variables from patient-completed questionnaires. North Central Cancer Treatment Group. Journal of Clinical Oncology. 12(3):601-7, 1994.*\n\nwgt_catn consists of: 1= patients a weight loss of zero or less, 0= patients with a weight loss of more than zero\n\ntrt01pn consists of 1= active treatment, 0 = placebo\n\n# Model Fit\n\nWe analyze the event of weight gain (or staying the same weight) in lung cancer patients in dependency of treatment (active or placebo), age, sex, ECOG performance score and calories consumed at meals. One of the most important things to remember is to ensure you tell R what your event is and what treatment comparison you are doing Active / Placebo or Placebo/Active! The easiest way to do this is to have event (or non-reference treatment) as 1, and non-event (reference treatment) as 0.\n\nBelow we are using wt_catn (0,1) and trt01pn (1,2) and sex (1,2). Let's see what happens !\n\nhead(lung)\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlung <- read.csv(\"../data/lung_cancer.csv\")\nhead(lung)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n inst time status age sex ph.ecog ph.karno pat.karno meal.cal wt.loss trt01p\n1 3 306 2 74 1 1 90 100 1175 NA Active\n2 3 455 2 68 1 0 90 90 1225 15 Active\n3 3 1010 1 56 1 0 90 90 NA 15 Active\n4 5 210 2 57 1 1 90 60 1150 11 Active\n5 1 883 2 60 1 0 100 90 NA 0 Active\n6 12 1022 1 74 1 1 50 80 513 0 Active\n trt01pn dose_mg dose_id wt_cat wt_catn cnsr\n1 1 10 1 NA 0\n2 1 10 1 loss 0 0\n3 1 10 1 loss 0 1\n4 1 10 1 loss 0 0\n5 1 10 1 gain 1 0\n6 1 10 1 gain 1 1\n```\n\n\n:::\n\n```{.r .cell-code}\nm1 <- glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\")\n)\nsummary(m1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal, \n family = binomial(link = \"logit\"), data = lung)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -3.8623998 1.7675776 -2.185 0.0289 *\ntrt01pn 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n:::\n\n\nR by default sets the first category as the baseline category, hence trt01pn =1 and sex =1 are the baseline level, and other levels of the variable are contrasted to this level. This is using `contr.treatment` option (more information on this later!). The estimate for those variables (0.3887 and 0.8321) are the increase in the log-odds of being of receiving treatment 2 vs 1, and of being of sex=2 vs 1. The exponential of the estimate is the odds ratio. For example, exp(0.3887)=1.475, hence Treatment 2 is 1.475 times as likely to have weight gain compared to Treatment 1.\n\nThe intercept represents the baseline log odds of the outcome when all predictor variables are set to zero. In the above model, we have variables treatment and sex, each coded as 1 and 2. Currently R is not treating these as binary factors, instead R thinks they are a continuous variable, and hence the intercept is where treatment=0 and sex=0. In the context of our model, this doesn't make sense as you can't have a zero gender and zero treatment (but it may not matter as we rarely look at the intercept term anyway!)\n\nHowever, if you want to have an interpretable intercept term (and if you want to match SAS output for intercept!), then it's important to ensure any factors in your model as fitted as such in R. You can do this using: `lung$trt01pn<-as.factor(lung$trt01pn)` or by changing the variable to a 0 and 1. Note if you are using the same `contr.treatment` option, then this only affects the intercept estimate not the variable estimates.\n\n## Modelling factors correctly and Interpretation of the Intercept\n\nSo far we've learnt that it is good practice to always have binary and categorical variables set as factors, and that we should specify what method we want R to use for doing any contrasts (e.g. `contr.treatment`). You can specify different contrast methods for each variable by including them in a list in the model. It can sometimes be hard to see what contrast method R is using, so best to always specify this in your model (e.g. `contrasts = list(trt01pn = \"contr.treatment\", sex=\"contr.sum\")`. It is helpful to view what the contrasts look like before you select which to use.\n\nA factor with 2 levels (2 treatments) using `contr.treatment` would set the first level to be the reference (0) and contrast the second level to the baseline.\n\nA factor with 4 levels (4 treatments) using `contr.treatment` would set the first level to be the reference (0), you would see 3 parameters in the model with 3 estimates corresponding to the increase in log-odds attributable to contrasting the second, third or fourth level to the baseline.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncontr.treatment(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2\n1 0\n2 1\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.treatment(4)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2 3 4\n1 0 0 0\n2 1 0 0\n3 0 1 0\n4 0 0 1\n```\n\n\n:::\n:::\n\n\nBelow we apply `contr.treatment` to our trt01pn and sex variables. Note: how the variables trt01pn2 and sex2 are now shown in the output, this is indicating that these rows relates to treatment=2 and sex=2. See the next section for how to interpret the estimates and change this `contr.` option.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Good practice to have binary variables (eg. treatment) identified as a factor\n# And to specify what contrast option you are using ! more on this below\nlung$trt01pn <- as.factor(lung$trt01pn)\nlung$sex <- as.factor(lung$sex)\n\nm1 <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\")\n)\nsummary(m1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.6415326 1.5140191 -1.745 0.0810 .\ntrt01pn2 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n:::\n\n\n# Model parameter estimates\n\nThe model summary contains the parameter estimates $\\beta_j$ for each explanatory variable $x_j$, corresponding to the log-odds for the response variable to take the value $1$, conditional on all other explanatory variables remaining constant. For better interpretation, we can exponentiate these estimates, to obtain estimates for the odds instead and provide 95% confidence intervals.\n\n## How R parameterizes your variables in the model\n\n- contr.treatment - \\[default\\] sets the first level to be the reference and contrasts each other level with the baseline. For example, based on the model shown below. Log-odds for Treatment 2 = -2.64153 + 0.38876 =-2.25277 , Log-odds for Treatment 1=-2.64153\n\n- contr.sum - sets the last level to be the reference and compares the mean of the dependent variable for a given level to the overall mean of the dependent variable. For treatment 1, the intercept + trt01pn1 estimate. For Treatment 2, the intercept - trt01pn1 estimate.\\\n Log-odds for Treatment 2 = -2.44715 - - 0.19438 = -2.25277, Log-odds for Treatment 1=-2.44715 - 0.19438 = -2.64153\\\n use `contr.sum(2)` to show the contrast you are using and hence how to interpret your estimates.\n\n- exponential (ratio log-odds) = odds ratio. eg. -2.25277/ -2.64153 = 0.85283 Treatment 2 is 0.85 times as likely (eg. 15% less likely) to have weight gain compared to Treatment 1.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nma <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\")\n)\nsummary(ma)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.treatment\", sex = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.6415326 1.5140191 -1.745 0.0810 .\ntrt01pn2 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.treatment(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2\n1 0\n2 1\n```\n\n\n:::\n\n```{.r .cell-code}\nmb <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.sum\", sex = \"contr.treatment\")\n)\nsummary(mb)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.sum\", sex = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.4471493 1.4929863 -1.639 0.1012 \ntrt01pn1 -0.1943833 0.1890783 -1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.sum(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [,1]\n1 1\n2 -1\n```\n\n\n:::\n:::\n\n\n## Calculation of confidence intervals\n\nUsing the above model, you can output the estimates and confidence intervals using coef() and confint() and exponential back transforming using exp(). NOTE: that there are two types of confidence intervals that you can calculate. Function `confint.default` gives the Wald confidence limits, which is the default option in SAS `PROC LOGISTIC` procedure; whereas `confint` gives the profile-likelihood limits.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# model coefficients summary\nsummary(m1)$coefficients\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error z value Pr(>|z|)\n(Intercept) -2.6415326252 1.5140190574 -1.7447156 0.08103439\ntrt01pn2 0.3887666677 0.3781565596 1.0280574 0.30392281\nage 0.0122549015 0.0211552875 0.5792831 0.56239813\nsex2 0.8321005169 0.3743792762 2.2226137 0.02624186\nph.ecog -0.3763592487 0.2638321918 -1.4265100 0.15372119\nmeal.cal 0.0008499918 0.0004486401 1.8945961 0.05814593\n```\n\n\n:::\n\n```{.r .cell-code}\n# Wald confidence limits\ncbind(est = exp(coef(m1)), exp(confint.default(m1)))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est 2.5 % 97.5 %\n(Intercept) 0.07125198 0.003664896 1.385263\ntrt01pn2 1.47516031 0.702994248 3.095470\nage 1.01233030 0.971213751 1.055188\nsex2 2.29814096 1.103327506 4.786840\nph.ecog 0.68635572 0.409236994 1.151128\nmeal.cal 1.00085035 0.999970674 1.001731\n```\n\n\n:::\n\n```{.r .cell-code}\n# profile-likelihood limits\ncbind(est = exp(coef(m1)), exp(confint(m1)))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWaiting for profiling to be done...\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n est 2.5 % 97.5 %\n(Intercept) 0.07125198 0.003312288 1.302587\ntrt01pn2 1.47516031 0.696210092 3.085089\nage 1.01233030 0.971670916 1.056194\nsex2 2.29814096 1.107651762 4.836770\nph.ecog 0.68635572 0.405659156 1.147452\nmeal.cal 1.00085035 0.999978126 1.001761\n```\n\n\n:::\n:::\n\n\n# Comparing 2 models\n\nTo compare two logistic models, the `residual deviances` (-2 \\* log likelihoods) are compared against a $\\chi^2$-distribution with degrees of freedom calculated using the difference in the two models' parameters. Below, the only difference is the inclusion/exclusion of age in the model, hence we test using $\\chi^2$ with 1 df. Here testing at the 5% level.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nm2 <- stats::glm(\n wt_catn ~ trt01pn + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\")\n)\nsummary(m2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + sex + ph.ecog + meal.cal, \n family = binomial(link = \"logit\"), data = lung, contrasts = list(trt01pn = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -1.8350766 0.5810727 -3.158 0.00159 **\ntrt01pn2 0.3681563 0.3761976 0.979 0.32777 \nsex2 0.7919227 0.3674936 2.155 0.03117 * \nph.ecog -0.3312031 0.2527586 -1.310 0.19008 \nmeal.cal 0.0007896 0.0004378 1.803 0.07131 . \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.80 on 165 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 200.8\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\nanova(m1, m2, test = \"LRT\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Deviance Table\n\nModel 1: wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal\nModel 2: wt_catn ~ trt01pn + sex + ph.ecog + meal.cal\n Resid. Df Resid. Dev Df Deviance Pr(>Chi)\n1 164 190.46 \n2 165 190.80 -1 -0.33867 0.5606\n```\n\n\n:::\n:::\n\n\nStackexchange [here](https://stats.stackexchange.com/questions/59879/logistic-regression-anova-chi-square-test-vs-significance-of-coefficients-ano) has a good article describing this method and the difference between comparing 2 models using the likelihood ratio tests versus using wald tests and Pr\\>chisq (from the maximum likelihood estimate). Note: `anova(m1, m2, test = \"Chisq\")` and using `test=\"LRT\"` as above are synonymous in this context.\n\n# Predicting likelihood of response for new patients\n\nPredictions from the model for the log-odds of a patient with new data to experience a weight loss are derived using `predict()`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# new female, symptomatic but completely ambulatory patient consuming 2500 calories\nnew_pt <- data.frame(\n trt01pn = 1,\n age = 48,\n sex = 2,\n ph.ecog = 1,\n meal.cal = 2500\n)\nnew_pt$trt01pn <- as.factor(new_pt$trt01pn)\nnew_pt$sex <- as.factor(new_pt$sex)\npredict(m1, new_pt, type = \"response\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 1 \n0.628882 \n```\n\n\n:::\n:::\n\n\n# Creating Treatment Contrasts for 2 treatments\n\n## {emmeans}\n\nHere we will use {emmeans} to output the log-odds of weight gain for treatment 1 and treatment 2.\n\nNOTE as per the output, these are on the logit scale, you need to exponentiate to get the odds (or use the type=\"response\" option).\n\nThe treatment comparison can also be output on the log-odds or back transformed scale as shown below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nm3 <- stats::glm(\n wt_catn ~ trt01pn + age + sex + ph.ecog + meal.cal,\n data = lung,\n family = binomial(link = \"logit\"),\n contrasts = list(trt01pn = \"contr.treatment\")\n)\nsummary(m3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nstats::glm(formula = wt_catn ~ trt01pn + age + sex + ph.ecog + \n meal.cal, family = binomial(link = \"logit\"), data = lung, \n contrasts = list(trt01pn = \"contr.treatment\"))\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -2.6415326 1.5140191 -1.745 0.0810 .\ntrt01pn2 0.3887667 0.3781566 1.028 0.3039 \nage 0.0122549 0.0211553 0.579 0.5624 \nsex2 0.8321005 0.3743793 2.223 0.0262 *\nph.ecog -0.3763592 0.2638322 -1.427 0.1537 \nmeal.cal 0.0008500 0.0004486 1.895 0.0581 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 202.36 on 169 degrees of freedom\nResidual deviance: 190.46 on 164 degrees of freedom\n (58 observations deleted due to missingness)\nAIC: 202.46\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n\n```{.r .cell-code}\ncontr.treatment(2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2\n1 0\n2 1\n```\n\n\n:::\n\n```{.r .cell-code}\n# log-odds for each treatment\nlsm <- emmeans::emmeans(m3, \"trt01pn\")\nlsm\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trt01pn emmean SE df asymp.LCL asymp.UCL\n 1 -1.034 0.225 Inf -1.47 -0.5935\n 2 -0.645 0.302 Inf -1.24 -0.0529\n\nResults are averaged over the levels of: sex \nResults are given on the logit (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\n# log-odds ratios (treatment comparison): This does all pairwise comparisons\n# However as seen below, this is TRT 1 - TRT 2\npairs(lsm)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df z.ratio p.value\n trt01pn1 - trt01pn2 -0.389 0.378 Inf -1.028 0.3039\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\n# the below creates tests and CI's prior to back transformation (ratios of geometric means)\npairs(lsm, type = \"response\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast odds.ratio SE df null z.ratio p.value\n trt01pn1 / trt01pn2 0.678 0.256 Inf 1 -1.028 0.3039\n\nResults are averaged over the levels of: sex \nTests are performed on the log odds ratio scale \n```\n\n\n:::\n\n```{.r .cell-code}\n# see coefficients of the linear functions\ncoef(pairs(lsm))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n trt01pn c.1\n1 1 1\n2 2 -1\n```\n\n\n:::\n\n```{.r .cell-code}\n# Output treatment contrasts 2 vs 1 and 95% CIs, the type=\"response\" option back transforms the results\ntrtdiff <- contrast(lsm, \"poly\")\ntrtdiff\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df z.ratio p.value\n linear 0.389 0.378 Inf 1.028 0.3039\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(trtdiff, type = \"response\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast odds.ratio SE df asymp.LCL asymp.UCL\n linear 1.48 0.558 Inf 0.703 3.1\n\nResults are averaged over the levels of: sex \nConfidence level used: 0.95 \nIntervals are back-transformed from the log odds ratio scale \n```\n\n\n:::\n:::\n\n\nIn Summary: Treatment 2 is on average 1.48 times as likely to have weight gain compared to treatment 1, however this is not statistically significant (95% Confidence interval = 0.703-3.100, p-value= 0.3039).\n\n# Creating Treatment Contrasts for 2 or more treatments\n\n{emmeans} can also be used to do specific contrasts, instead of investigating treatment (active vs placebo), suppose we now want to look at 2 dose groups vs placebo.\n\nWe have a 3 level treatment variable (dose_id), where 1=10mg, 2=20mg doses for active treatment and 3= placebo which is 0mg. We want to test the null hypothesis that 0.5\\**dose10mg + 0.5*\\*dose20mg - placebo = 0. That there is not difference between the doses.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlung2 <- lung |>\n mutate(dose_id2 = as.factor(lung$dose_id))\n\nm3 <- stats::glm(\n wt_catn ~ dose_id2 + age + sex + ph.ecog + meal.cal,\n data = lung2,\n family = binomial(link = \"logit\"),\n contrasts = list(dose_id2 = \"contr.treatment\")\n)\n\n# log-odds for each treatment\nlsm3 <- emmeans::emmeans(m3, \"dose_id2\")\nlsm3\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n dose_id2 emmean SE df asymp.LCL asymp.UCL\n 1 -0.903 0.296 Inf -1.48 -0.3228\n 2 -1.201 0.348 Inf -1.88 -0.5201\n 3 -0.643 0.302 Inf -1.23 -0.0518\n\nResults are averaged over the levels of: sex \nResults are given on the logit (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\ncontrast(lsm3, list(AveDose_vs_pbo = c(0.5, 0.5, -1)))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df z.ratio p.value\n AveDose_vs_pbo -0.41 0.38 Inf -1.078 0.2813\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(contrast(lsm3, list(AveDose_vs_pbo = c(0.5, 0.5, -1))))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n contrast estimate SE df asymp.LCL asymp.UCL\n AveDose_vs_pbo -0.41 0.38 Inf -1.15 0.335\n\nResults are averaged over the levels of: sex \nResults are given on the log odds ratio (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n:::\n\n\nHere we found that on average there is -0.41 times the risk of weight gain on active vs placebo but that this is not statisticallly significantly different (95% CI -1.15 to 0.335, p-value = 0.2813).\n\nSee the emmeans vignette on creating bespoke contrasts [here](Comparisons%20and%20contrasts%20in%20emmeans).\n\n## {gmodels}\n\n{gmodels} is an alternative package to create contrasts instead of \\, you can use the `fit.contrast()` function from the `gmodels` package. The same result is obtained as \\.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlung2 <- lung |>\n mutate(dose_id2 = as.factor(lung$dose_id))\n\nm3 <- stats::glm(\n wt_catn ~ dose_id2 + age + sex + ph.ecog + meal.cal,\n data = lung2,\n family = binomial(link = \"logit\"),\n contrasts = list(dose_id2 = \"contr.treatment\")\n)\n\ngmodels::fit.contrast(m3, 'dose_id2', c(0.5, 0.5, -1), conf.int = 0.95)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Estimate Std. Error z value Pr(>|z|) lower CI\ndose_id2 c=( 0.5 0.5 -1 ) -0.4096323 0.3801683 -1.077502 0.2812558 -1.160322\n upper CI\ndose_id2 c=( 0.5 0.5 -1 ) 0.3410574\n```\n\n\n:::\n:::\n\n\n# Reference\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P emmeans * 2.0.1 2025-12-16 [?] RSPM (R 4.5.0)\n P estimability 1.5.1 2024-05-12 [?] RSPM (R 4.5.0)\n P gdata 3.0.1 2024-10-22 [?] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n P gmodels * 2.19.1 2024-03-06 [?] RSPM (R 4.5.0)\n P gtools 3.9.5 2023-11-20 [?] RSPM (R 4.5.0)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM (R 4.5.0)\n numDeriv 2016.8-1.1 2019-06-06 [1] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P tibble 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n utf8 1.2.6 2025-06-08 [1] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n withr 3.0.2 2024-10-28 [1] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/marginal_homogeneity_tests/execute-results/html.json b/_freeze/R/marginal_homogeneity_tests/execute-results/html.json index ea82e2577..0edb3dac5 100644 --- a/_freeze/R/marginal_homogeneity_tests/execute-results/html.json +++ b/_freeze/R/marginal_homogeneity_tests/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "95381b7b1b29c8f4dfbee846e4825255", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Marginal Homogeneity Tests\"\n---\n\nThis page is solely based on **coin** package documentation including data samples which are generated inline.\n\n`coin::mh_test()` provides the McNemar test, the Cochran Q test, the Stuart(-Maxwell) test and the Madansky test of interchangeability. A general description of these methods is given by Agresti (2002).\n\nThe null hypothesis of marginal homogeneity is tested. If formula interface is used, the response variable and the measurement conditions are given by `y` and `x`, respectively, and `block` is a factor where each level corresponds to exactly one subject with repeated measurements: `coin::mh_test(y ~ x | block, data, subset = NULL, ...)`. We can also directly pass an object of class `\"table\"`.\n\n`coin::mh_test()` computes different tests depending on `x` and `y`:\n\n- McNemar test (McNemar, 1947) when both `y` and `x` are binary factors;\n\n- Cochran Q test (Cochran, 1950) when `y` is a binary factor and `x` is a factor with an arbitrary number of levels;\n\n- Stuart-Maxwell test (Stuart, 1955; Maxwell, 1970) when `y` is a factor with an arbitrary number of levels and `x` is a binary factor;\n\n- Madansky test of interchangeability (Madansky, 1963), which implies marginal homogeneity, when both `y` and `x` are factors with an arbitrary number of levels.\n\nThe conditional null distribution of the test statistic is used to obtain p-values and an asymptotic approximation of the exact distribution is used by default (`distribution = \"asymptotic\"`). Alternatively, the distribution can be approximated via Monte Carlo resampling or computed exactly for univariate two-sample problems (McNemar test) by setting distribution to `\"approximate\"` or `\"exact\"`, respectively.\n\n## McNemar test\n\nFor more information on the McNemar see the [McNemar’s test](https://psiaims.github.io/CAMIS/R/r_mcnemar.html) page.\n\n\n## Cochran Q test\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Effectiveness of different media for the growth of diphtheria\n## Cochran (1950, Tab. 2)\ncases <- c(4, 2, 3, 1, 59)\nn <- sum(cases)\ncochran <- data.frame(\n diphtheria = factor(\n unlist(rep(\n list(\n c(1, 1, 1, 1),\n c(1, 1, 0, 1),\n c(0, 1, 1, 1),\n c(0, 1, 0, 1),\n c(0, 0, 0, 0)\n ),\n cases\n ))\n ),\n media = factor(rep(LETTERS[1:4], n)),\n case = factor(rep(seq_len(n), each = 4))\n)\n\nhead(cochran)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n diphtheria media case\n1 1 A 1\n2 1 B 1\n3 1 C 1\n4 1 D 1\n5 1 A 2\n6 1 B 2\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Cochran Q test (Cochran, 1950, p. 260)\ncoin::mh_test(\n diphtheria ~ media | case,\n data = cochran\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: diphtheria by media (A, B, C, D) \n\t stratified by case\nchi-squared = 8.0526, df = 3, p-value = 0.04494\n```\n\n\n:::\n\n```{.r .cell-code}\n## Approximative Cochran Q test\nmt <- coin::mh_test(\n diphtheria ~ media | case,\n data = cochran,\n distribution = coin::approximate(nresample = 10000)\n)\ncoin::pvalue(mt) # standard p-value\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0512\n99 percent confidence interval:\n 0.04568760 0.05714443 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::midpvalue(mt) # mid-p-value\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0429\n99 percent confidence interval:\n 0.03789480 0.04833997 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::pvalue_interval(mt) # p-value interval\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n p_0 p_1 \n0.0346 0.0512 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::size(mt, alpha = 0.05) # test size at alpha = 0.05 using the p-value\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0346\n```\n\n\n:::\n:::\n\n\n## Stuart-Maxwell test\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Opinions on Pre- and Extramarital Sex\n## Agresti (2002, p. 421)\nopinions <- c(\n \"Always wrong\",\n \"Almost always wrong\",\n \"Wrong only sometimes\",\n \"Not wrong at all\"\n)\n# fmt: skip\nPreExSex <- matrix(\n c(144, 33, 84, 126,\n 2, 4, 14, 29,\n 0, 2, 6, 25,\n 0, 0, 1, 5),\n nrow = 4,\n dimnames = list(\n \"Premarital Sex\" = opinions,\n \"Extramarital Sex\" = opinions\n )\n)\nPreExSex <- as.table(PreExSex)\n\nPreExSex\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Extramarital Sex\nPremarital Sex Always wrong Almost always wrong Wrong only sometimes\n Always wrong 144 2 0\n Almost always wrong 33 4 2\n Wrong only sometimes 84 14 6\n Not wrong at all 126 29 25\n Extramarital Sex\nPremarital Sex Not wrong at all\n Always wrong 0\n Almost always wrong 0\n Wrong only sometimes 1\n Not wrong at all 5\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Stuart test\ncoin::mh_test(PreExSex)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: response by\n\t conditions (Premarital.Sex, Extramarital.Sex) \n\t stratified by block\nchi-squared = 271.92, df = 3, p-value < 2.2e-16\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Stuart-Birch test\n## Note: response as ordinal\ncoin::mh_test(\n PreExSex,\n scores = list(response = 1:length(opinions))\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test for Ordered Data\n\ndata: response (ordered) by\n\t conditions (Premarital.Sex, Extramarital.Sex) \n\t stratified by block\nZ = 16.454, p-value < 2.2e-16\nalternative hypothesis: two.sided\n```\n\n\n:::\n:::\n\n\n## Madansky test of interchangeability\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Vote intention\n## Madansky (1963, pp. 107-108)\n# fmt: skip\nvote <- array(\n c(120, 1, 8, 2, 2, 1, 2, 1, 7,\n 6, 2, 1, 1, 103, 5, 1, 4, 8,\n 20, 3, 31, 1, 6, 30, 2, 1, 81),\n dim = c(3, 3, 3),\n dimnames = list(\n \"July\" = c(\"Republican\", \"Democratic\", \"Uncertain\"),\n \"August\" = c(\"Republican\", \"Democratic\", \"Uncertain\"),\n \"June\" = c(\"Republican\", \"Democratic\", \"Uncertain\")\n )\n)\nvote <- as.table(vote)\n\nvote\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , June = Republican\n\n August\nJuly Republican Democratic Uncertain\n Republican 120 2 2\n Democratic 1 2 1\n Uncertain 8 1 7\n\n, , June = Democratic\n\n August\nJuly Republican Democratic Uncertain\n Republican 6 1 1\n Democratic 2 103 4\n Uncertain 1 5 8\n\n, , June = Uncertain\n\n August\nJuly Republican Democratic Uncertain\n Republican 20 1 2\n Democratic 3 6 1\n Uncertain 31 30 81\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Madansky test (Q = 70.77)\ncoin::mh_test(vote)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: response by\n\t conditions (July, August, June) \n\t stratified by block\nchi-squared = 70.763, df = 4, p-value = 1.565e-14\n```\n\n\n:::\n\n```{.r .cell-code}\n## Cross-over study\n## http://www.nesug.org/proceedings/nesug00/st/st9005.pdf (link is dead now)\n# fmt: skip\ndysmenorrhea <- array(\n c(6, 2, 1, 3, 1, 0, 1, 2, 1,\n 4, 3, 0, 13, 3, 0, 8, 1, 1,\n 5, 2, 2, 10, 1, 0, 14, 2, 0),\n dim = c(3, 3, 3),\n dimnames = list(\n \"Placebo\" = c(\"None\", \"Moderate\", \"Complete\"),\n \"Low dose\" = c(\"None\", \"Moderate\", \"Complete\"),\n \"High dose\" = c(\"None\", \"Moderate\", \"Complete\")\n )\n)\ndysmenorrhea <- as.table(dysmenorrhea)\n\ndysmenorrhea\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , High dose = None\n\n Low dose\nPlacebo None Moderate Complete\n None 6 3 1\n Moderate 2 1 2\n Complete 1 0 1\n\n, , High dose = Moderate\n\n Low dose\nPlacebo None Moderate Complete\n None 4 13 8\n Moderate 3 3 1\n Complete 0 0 1\n\n, , High dose = Complete\n\n Low dose\nPlacebo None Moderate Complete\n None 5 10 14\n Moderate 2 1 2\n Complete 2 0 0\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Madansky-Birch test (Q = 53.76)\n## Note: response as ordinal\ncoin::mh_test(\n dysmenorrhea,\n scores = list(response = 1:3)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test for Ordered Data\n\ndata: response (ordered) by\n\t conditions (Placebo, Low.dose, High.dose) \n\t stratified by block\nchi-squared = 53.762, df = 2, p-value = 2.117e-12\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Madansky-Birch test (Q = 47.29)\n## Note: response and measurement conditions as ordinal\ncoin::mh_test(\n dysmenorrhea,\n scores = list(response = 1:3, conditions = 1:3)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test for Ordered Data\n\ndata: response (ordered) by\n\t conditions (Placebo < Low.dose < High.dose) \n\t stratified by block\nZ = 6.8764, p-value = 6.138e-12\nalternative hypothesis: two.sided\n```\n\n\n:::\n:::\n\n\n## Reference\n\nHothorn T, Hornik K, van de Wiel MA, Zeileis A (2006). A Lego system for conditional inference. The American Statistician, 60 (3), 257-263. doi:10.1198/000313006X118430 \n\nAgresti, A. (2002). Categorical Data Analysis, Second Edition. Hoboken, New Jersey: John Wiley & Sons.\n\nBirch, M. W. (1965). The detection of partial association, II: The general case. Journal of the Royal Statistical Society B 27(1), 111–124. doi:10.1111/j.2517-6161.1965.tb00593.x\n\nCochran, W. G. (1950). The comparison of percentages in matched samples. Biometrika 37(3/4), 256–266. doi:10.1093/biomet/37.3-4.256\n\nMadansky, A. (1963). Tests of homogeneity for correlated samples. Journal of the American Statistical Association 58(301), 97–119. doi:10.1080/01621459.1963.10500835\n\nMaxwell, A. E. (1970). Comparing the classification of subjects by two independent judges. British Journal of Psychiatry 116(535), 651–655. doi:10.1192/bjp.116.535.651\n\nMcNemar, Q. (1947). Note on the sampling error of the difference between correlated proportions or percentages. Psychometrika 12(2), 153–157. doi:10.1007/BF02295996\n\nStuart, A. (1955). A test for homogeneity of the marginal distributions in a two-way classification. Biometrika 42(3/4), 412–416. doi:10.1093/biomet/42.3-4.412\n\nWhite, A. A., Landis, J. R. and Cooper, M. M. (1982). A note on the equivalence of several marginal homogeneity test criteria for categorical data. International Statistical Review 50(1), 27–34. doi:10.2307/1402457\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P coin 1.4-3 2023-09-27 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P libcoin 1.0-10 2023-09-27 [?] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P matrixStats 1.5.0 2025-01-07 [?] RSPM\n P modeltools 0.2-24 2025-05-02 [?] RSPM\n P multcomp 1.4-29 2025-10-20 [?] RSPM\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM\n P sandwich 3.1-1 2024-09-15 [?] RSPM\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P TH.data 1.1-5 2025-11-17 [?] RSPM\n P zoo 1.8-15 2025-12-15 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", + "markdown": "---\ntitle: \"Marginal Homogeneity Tests\"\n---\n\nThis page is solely based on **coin** package documentation including data samples which are generated inline.\n\n`coin::mh_test()` provides the McNemar test, the Cochran Q test, the Stuart(-Maxwell) test and the Madansky test of interchangeability. A general description of these methods is given by Agresti (2002).\n\nThe null hypothesis of marginal homogeneity is tested. If formula interface is used, the response variable and the measurement conditions are given by `y` and `x`, respectively, and `block` is a factor where each level corresponds to exactly one subject with repeated measurements: `coin::mh_test(y ~ x | block, data, subset = NULL, ...)`. We can also directly pass an object of class `\"table\"`.\n\n`coin::mh_test()` computes different tests depending on `x` and `y`:\n\n- McNemar test (McNemar, 1947) when both `y` and `x` are binary factors;\n\n- Cochran Q test (Cochran, 1950) when `y` is a binary factor and `x` is a factor with an arbitrary number of levels;\n\n- Stuart-Maxwell test (Stuart, 1955; Maxwell, 1970) when `y` is a factor with an arbitrary number of levels and `x` is a binary factor;\n\n- Madansky test of interchangeability (Madansky, 1963), which implies marginal homogeneity, when both `y` and `x` are factors with an arbitrary number of levels.\n\nThe conditional null distribution of the test statistic is used to obtain p-values and an asymptotic approximation of the exact distribution is used by default (`distribution = \"asymptotic\"`). Alternatively, the distribution can be approximated via Monte Carlo resampling or computed exactly for univariate two-sample problems (McNemar test) by setting distribution to `\"approximate\"` or `\"exact\"`, respectively.\n\n## McNemar test\n\nFor more information on the McNemar see the [McNemar’s test](https://psiaims.github.io/CAMIS/R/r_mcnemar.html) page.\n\n\n## Cochran Q test\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Effectiveness of different media for the growth of diphtheria\n## Cochran (1950, Tab. 2)\ncases <- c(4, 2, 3, 1, 59)\nn <- sum(cases)\ncochran <- data.frame(\n diphtheria = factor(\n unlist(rep(\n list(\n c(1, 1, 1, 1),\n c(1, 1, 0, 1),\n c(0, 1, 1, 1),\n c(0, 1, 0, 1),\n c(0, 0, 0, 0)\n ),\n cases\n ))\n ),\n media = factor(rep(LETTERS[1:4], n)),\n case = factor(rep(seq_len(n), each = 4))\n)\n\nhead(cochran)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n diphtheria media case\n1 1 A 1\n2 1 B 1\n3 1 C 1\n4 1 D 1\n5 1 A 2\n6 1 B 2\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Cochran Q test (Cochran, 1950, p. 260)\ncoin::mh_test(\n diphtheria ~ media | case,\n data = cochran\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: diphtheria by media (A, B, C, D) \n\t stratified by case\nchi-squared = 8.0526, df = 3, p-value = 0.04494\n```\n\n\n:::\n\n```{.r .cell-code}\n## Approximative Cochran Q test\nmt <- coin::mh_test(\n diphtheria ~ media | case,\n data = cochran,\n distribution = coin::approximate(nresample = 10000)\n)\ncoin::pvalue(mt) # standard p-value\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0512\n99 percent confidence interval:\n 0.04568760 0.05714443 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::midpvalue(mt) # mid-p-value\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.04215\n99 percent confidence interval:\n 0.03723595 0.04759948 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::pvalue_interval(mt) # p-value interval\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n p_0 p_1 \n0.0331 0.0512 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::size(mt, alpha = 0.05) # test size at alpha = 0.05 using the p-value\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0331\n```\n\n\n:::\n:::\n\n\n## Stuart-Maxwell test\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Opinions on Pre- and Extramarital Sex\n## Agresti (2002, p. 421)\nopinions <- c(\n \"Always wrong\",\n \"Almost always wrong\",\n \"Wrong only sometimes\",\n \"Not wrong at all\"\n)\n# fmt: skip\nPreExSex <- matrix(\n c(144, 33, 84, 126,\n 2, 4, 14, 29,\n 0, 2, 6, 25,\n 0, 0, 1, 5),\n nrow = 4,\n dimnames = list(\n \"Premarital Sex\" = opinions,\n \"Extramarital Sex\" = opinions\n )\n)\nPreExSex <- as.table(PreExSex)\n\nPreExSex\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Extramarital Sex\nPremarital Sex Always wrong Almost always wrong Wrong only sometimes\n Always wrong 144 2 0\n Almost always wrong 33 4 2\n Wrong only sometimes 84 14 6\n Not wrong at all 126 29 25\n Extramarital Sex\nPremarital Sex Not wrong at all\n Always wrong 0\n Almost always wrong 0\n Wrong only sometimes 1\n Not wrong at all 5\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Stuart test\ncoin::mh_test(PreExSex)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: response by\n\t conditions (Premarital.Sex, Extramarital.Sex) \n\t stratified by block\nchi-squared = 271.92, df = 3, p-value < 2.2e-16\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Stuart-Birch test\n## Note: response as ordinal\ncoin::mh_test(\n PreExSex,\n scores = list(response = 1:length(opinions))\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test for Ordered Data\n\ndata: response (ordered) by\n\t conditions (Premarital.Sex, Extramarital.Sex) \n\t stratified by block\nZ = 16.454, p-value < 2.2e-16\nalternative hypothesis: two.sided\n```\n\n\n:::\n:::\n\n\n## Madansky test of interchangeability\n\n\n::: {.cell}\n\n```{.r .cell-code}\n## Vote intention\n## Madansky (1963, pp. 107-108)\n# fmt: skip\nvote <- array(\n c(120, 1, 8, 2, 2, 1, 2, 1, 7,\n 6, 2, 1, 1, 103, 5, 1, 4, 8,\n 20, 3, 31, 1, 6, 30, 2, 1, 81),\n dim = c(3, 3, 3),\n dimnames = list(\n \"July\" = c(\"Republican\", \"Democratic\", \"Uncertain\"),\n \"August\" = c(\"Republican\", \"Democratic\", \"Uncertain\"),\n \"June\" = c(\"Republican\", \"Democratic\", \"Uncertain\")\n )\n)\nvote <- as.table(vote)\n\nvote\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , June = Republican\n\n August\nJuly Republican Democratic Uncertain\n Republican 120 2 2\n Democratic 1 2 1\n Uncertain 8 1 7\n\n, , June = Democratic\n\n August\nJuly Republican Democratic Uncertain\n Republican 6 1 1\n Democratic 2 103 4\n Uncertain 1 5 8\n\n, , June = Uncertain\n\n August\nJuly Republican Democratic Uncertain\n Republican 20 1 2\n Democratic 3 6 1\n Uncertain 31 30 81\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Madansky test (Q = 70.77)\ncoin::mh_test(vote)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test\n\ndata: response by\n\t conditions (July, August, June) \n\t stratified by block\nchi-squared = 70.763, df = 4, p-value = 1.565e-14\n```\n\n\n:::\n\n```{.r .cell-code}\n## Cross-over study\n## http://www.nesug.org/proceedings/nesug00/st/st9005.pdf (link is dead now)\n# fmt: skip\ndysmenorrhea <- array(\n c(6, 2, 1, 3, 1, 0, 1, 2, 1,\n 4, 3, 0, 13, 3, 0, 8, 1, 1,\n 5, 2, 2, 10, 1, 0, 14, 2, 0),\n dim = c(3, 3, 3),\n dimnames = list(\n \"Placebo\" = c(\"None\", \"Moderate\", \"Complete\"),\n \"Low dose\" = c(\"None\", \"Moderate\", \"Complete\"),\n \"High dose\" = c(\"None\", \"Moderate\", \"Complete\")\n )\n)\ndysmenorrhea <- as.table(dysmenorrhea)\n\ndysmenorrhea\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n, , High dose = None\n\n Low dose\nPlacebo None Moderate Complete\n None 6 3 1\n Moderate 2 1 2\n Complete 1 0 1\n\n, , High dose = Moderate\n\n Low dose\nPlacebo None Moderate Complete\n None 4 13 8\n Moderate 3 3 1\n Complete 0 0 1\n\n, , High dose = Complete\n\n Low dose\nPlacebo None Moderate Complete\n None 5 10 14\n Moderate 2 1 2\n Complete 2 0 0\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Madansky-Birch test (Q = 53.76)\n## Note: response as ordinal\ncoin::mh_test(\n dysmenorrhea,\n scores = list(response = 1:3)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test for Ordered Data\n\ndata: response (ordered) by\n\t conditions (Placebo, Low.dose, High.dose) \n\t stratified by block\nchi-squared = 53.762, df = 2, p-value = 2.117e-12\n```\n\n\n:::\n\n```{.r .cell-code}\n## Asymptotic Madansky-Birch test (Q = 47.29)\n## Note: response and measurement conditions as ordinal\ncoin::mh_test(\n dysmenorrhea,\n scores = list(response = 1:3, conditions = 1:3)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Marginal Homogeneity Test for Ordered Data\n\ndata: response (ordered) by\n\t conditions (Placebo < Low.dose < High.dose) \n\t stratified by block\nZ = 6.8764, p-value = 6.138e-12\nalternative hypothesis: two.sided\n```\n\n\n:::\n:::\n\n\n## Reference\n\nHothorn T, Hornik K, van de Wiel MA, Zeileis A (2006). A Lego system for conditional inference. The American Statistician, 60 (3), 257-263. doi:10.1198/000313006X118430 \n\nAgresti, A. (2002). Categorical Data Analysis, Second Edition. Hoboken, New Jersey: John Wiley & Sons.\n\nBirch, M. W. (1965). The detection of partial association, II: The general case. Journal of the Royal Statistical Society B 27(1), 111–124. doi:10.1111/j.2517-6161.1965.tb00593.x\n\nCochran, W. G. (1950). The comparison of percentages in matched samples. Biometrika 37(3/4), 256–266. doi:10.1093/biomet/37.3-4.256\n\nMadansky, A. (1963). Tests of homogeneity for correlated samples. Journal of the American Statistical Association 58(301), 97–119. doi:10.1080/01621459.1963.10500835\n\nMaxwell, A. E. (1970). Comparing the classification of subjects by two independent judges. British Journal of Psychiatry 116(535), 651–655. doi:10.1192/bjp.116.535.651\n\nMcNemar, Q. (1947). Note on the sampling error of the difference between correlated proportions or percentages. Psychometrika 12(2), 153–157. doi:10.1007/BF02295996\n\nStuart, A. (1955). A test for homogeneity of the marginal distributions in a two-way classification. Biometrika 42(3/4), 412–416. doi:10.1093/biomet/42.3-4.412\n\nWhite, A. A., Landis, J. R. and Cooper, M. M. (1982). A note on the equivalence of several marginal homogeneity test criteria for categorical data. International Statistical Review 50(1), 27–34. doi:10.2307/1402457\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P coin 1.4-3 2023-09-27 [?] RSPM (R 4.5.0)\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P libcoin 1.0-10 2023-09-27 [?] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P matrixStats 1.5.0 2025-01-07 [?] RSPM (R 4.5.0)\n P modeltools 0.2-24 2025-05-02 [?] RSPM (R 4.5.0)\n P multcomp 1.4-29 2025-10-20 [?] RSPM (R 4.5.0)\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM (R 4.5.0)\n P sandwich 3.1-1 2024-09-15 [?] RSPM (R 4.5.0)\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P TH.data 1.1-5 2025-11-17 [?] RSPM (R 4.5.0)\n P zoo 1.8-15 2025-12-15 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/mi_mar_predictive_mean_match/execute-results/html.json b/_freeze/R/mi_mar_predictive_mean_match/execute-results/html.json index ed871e8d2..d7c41f694 100644 --- a/_freeze/R/mi_mar_predictive_mean_match/execute-results/html.json +++ b/_freeze/R/mi_mar_predictive_mean_match/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "94208e05fedc3255c75da1948d31a700", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Multiple Imputation: Predictive Mean Matching\"\n---\n\n## Overview\n\nPredictive mean matching is a technique for missing value imputation. It calculates the predicted value of the missing variable based on a regression model from complete data, then selects one value (from the observed) that produces the closest prediction. PMM is robust to transformation, less vulnerable to model misspecification. More theoretical details for PMM can be found [here](https://stefvanbuuren.name/fimd/sec-pmm.html).\n\nAssumption for PMM: distribution of missing is the same aas obsereved data of the candidates that produce the closest values to the predicted value by the missing entry.\n\n\n## Available R package\n\n[mice](https://amices.org/mice/index.html) is a powerful R package developed by Stef van Buuren, Karin Groothuis-Oudshoorn and other contributors. \n\nImplementation of PMM in `mice`: \n\n* Predictive mean matching, [mice.impute.pmm](https://amices.org/mice/reference/mice.impute.pmm.html)\n* Weighted predictive mean matching, [mice.impute.midastouch](https://amices.org/mice/reference/mice.impute.midastouch.html)\n* Multivariate predictive mean matching, [mice.impute.mpmm](https://amices.org/mice/reference/mice.impute.mpmm.html)\n\n## Example\n\nWe use the small dataset `nhanes` included in `mice` package. It has 25 rows, and three out of four variables have missings. \n\nThe original NHANES data is a large national level survey, some are publicly available via R package `nhanes`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(mice)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'mice'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:stats':\n\n filter\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:base':\n\n cbind, rbind\n```\n\n\n:::\n\n```{.r .cell-code}\n# load example dataset from mice\nhead(nhanes)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n age bmi hyp chl\n1 1 NA NA NA\n2 2 22.7 1 187\n3 1 NA 1 187\n4 3 NA NA NA\n5 1 20.4 1 113\n6 3 NA NA 184\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(nhanes)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n age bmi hyp chl \n Min. :1.00 Min. :20.40 Min. :1.000 Min. :113.0 \n 1st Qu.:1.00 1st Qu.:22.65 1st Qu.:1.000 1st Qu.:185.0 \n Median :2.00 Median :26.75 Median :1.000 Median :187.0 \n Mean :1.76 Mean :26.56 Mean :1.235 Mean :191.4 \n 3rd Qu.:2.00 3rd Qu.:28.93 3rd Qu.:1.000 3rd Qu.:212.0 \n Max. :3.00 Max. :35.30 Max. :2.000 Max. :284.0 \n NA's :9 NA's :8 NA's :10 \n```\n\n\n:::\n:::\n\n\n### Impute with PMM\n\nTo impute with PMM is straightforward: specify the method, `method = pmm`. \n\n\n::: {.cell}\n\n```{.r .cell-code}\nimp_pmm <- mice::mice(nhanes, method = 'pmm', m = 5, maxit = 10)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n iter imp variable\n 1 1 bmi hyp chl\n 1 2 bmi hyp chl\n 1 3 bmi hyp chl\n 1 4 bmi hyp chl\n 1 5 bmi hyp chl\n 2 1 bmi hyp chl\n 2 2 bmi hyp chl\n 2 3 bmi hyp chl\n 2 4 bmi hyp chl\n 2 5 bmi hyp chl\n 3 1 bmi hyp chl\n 3 2 bmi hyp chl\n 3 3 bmi hyp chl\n 3 4 bmi hyp chl\n 3 5 bmi hyp chl\n 4 1 bmi hyp chl\n 4 2 bmi hyp chl\n 4 3 bmi hyp chl\n 4 4 bmi hyp chl\n 4 5 bmi hyp chl\n 5 1 bmi hyp chl\n 5 2 bmi hyp chl\n 5 3 bmi hyp chl\n 5 4 bmi hyp chl\n 5 5 bmi hyp chl\n 6 1 bmi hyp chl\n 6 2 bmi hyp chl\n 6 3 bmi hyp chl\n 6 4 bmi hyp chl\n 6 5 bmi hyp chl\n 7 1 bmi hyp chl\n 7 2 bmi hyp chl\n 7 3 bmi hyp chl\n 7 4 bmi hyp chl\n 7 5 bmi hyp chl\n 8 1 bmi hyp chl\n 8 2 bmi hyp chl\n 8 3 bmi hyp chl\n 8 4 bmi hyp chl\n 8 5 bmi hyp chl\n 9 1 bmi hyp chl\n 9 2 bmi hyp chl\n 9 3 bmi hyp chl\n 9 4 bmi hyp chl\n 9 5 bmi hyp chl\n 10 1 bmi hyp chl\n 10 2 bmi hyp chl\n 10 3 bmi hyp chl\n 10 4 bmi hyp chl\n 10 5 bmi hyp chl\n```\n\n\n:::\n\n```{.r .cell-code}\nimp_pmm\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nClass: mids\nNumber of multiple imputations: 5 \nImputation methods:\n age bmi hyp chl \n \"\" \"pmm\" \"pmm\" \"pmm\" \nPredictorMatrix:\n age bmi hyp chl\nage 0 1 1 1\nbmi 1 0 1 1\nhyp 1 1 0 1\nchl 1 1 1 0\n```\n\n\n:::\n\n```{.r .cell-code}\n# imputations for bmi\nimp_pmm$imp$bmi\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 1 2 3 4 5\n1 35.3 27.2 26.3 26.3 29.6\n3 27.2 29.6 29.6 26.3 27.2\n4 27.5 20.4 22.7 22.5 27.4\n6 24.9 22.5 21.7 25.5 20.4\n10 21.7 22.5 22.0 22.5 22.0\n11 29.6 27.2 22.0 28.7 30.1\n12 27.2 20.4 28.7 27.4 20.4\n16 27.2 30.1 26.3 30.1 22.0\n21 27.2 35.3 33.2 22.0 22.7\n```\n\n\n:::\n:::\n\n\nAn alternative to the standard PMM is `midastouch`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimp_pmms <- mice::mice(nhanes, method = 'midastouch', m = 5, maxit = 10)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n iter imp variable\n 1 1 bmi hyp chl\n 1 2 bmi hyp chl\n 1 3 bmi hyp chl\n 1 4 bmi hyp chl\n 1 5 bmi hyp chl\n 2 1 bmi hyp chl\n 2 2 bmi hyp chl\n 2 3 bmi hyp chl\n 2 4 bmi hyp chl\n 2 5 bmi hyp chl\n 3 1 bmi hyp chl\n 3 2 bmi hyp chl\n 3 3 bmi hyp chl\n 3 4 bmi hyp chl\n 3 5 bmi hyp chl\n 4 1 bmi hyp chl\n 4 2 bmi hyp chl\n 4 3 bmi hyp chl\n 4 4 bmi hyp chl\n 4 5 bmi hyp chl\n 5 1 bmi hyp chl\n 5 2 bmi hyp chl\n 5 3 bmi hyp chl\n 5 4 bmi hyp chl\n 5 5 bmi hyp chl\n 6 1 bmi hyp chl\n 6 2 bmi hyp chl\n 6 3 bmi hyp chl\n 6 4 bmi hyp chl\n 6 5 bmi hyp chl\n 7 1 bmi hyp chl\n 7 2 bmi hyp chl\n 7 3 bmi hyp chl\n 7 4 bmi hyp chl\n 7 5 bmi hyp chl\n 8 1 bmi hyp chl\n 8 2 bmi hyp chl\n 8 3 bmi hyp chl\n 8 4 bmi hyp chl\n 8 5 bmi hyp chl\n 9 1 bmi hyp chl\n 9 2 bmi hyp chl\n 9 3 bmi hyp chl\n 9 4 bmi hyp chl\n 9 5 bmi hyp chl\n 10 1 bmi hyp chl\n 10 2 bmi hyp chl\n 10 3 bmi hyp chl\n 10 4 bmi hyp chl\n 10 5 bmi hyp chl\n```\n\n\n:::\n\n```{.r .cell-code}\nimp_pmm\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nClass: mids\nNumber of multiple imputations: 5 \nImputation methods:\n age bmi hyp chl \n \"\" \"pmm\" \"pmm\" \"pmm\" \nPredictorMatrix:\n age bmi hyp chl\nage 0 1 1 1\nbmi 1 0 1 1\nhyp 1 1 0 1\nchl 1 1 1 0\n```\n\n\n:::\n\n```{.r .cell-code}\nimp_pmms$imp$bmi\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 1 2 3 4 5\n1 20.4 22.5 29.6 27.5 22.0\n3 30.1 29.6 29.6 30.1 30.1\n4 25.5 27.4 27.4 21.7 27.5\n6 25.5 21.7 22.7 25.5 27.5\n10 27.5 27.4 22.7 27.5 26.3\n11 30.1 29.6 29.6 29.6 30.1\n12 22.7 22.5 27.4 27.5 28.7\n16 20.4 30.1 29.6 29.6 30.1\n21 33.2 33.2 29.6 29.6 30.1\n```\n\n\n:::\n:::\n\n\n# Reference\n\nStef van Buuren, Karin Groothuis-Oudshoorn (2011). mice: Multivariate Imputation by Chained Equations in R. Journal of Statistical Software, 45(3), 1-67. DOI 10.18637/jss.v045.i03\n", + "markdown": "---\ntitle: \"Multiple Imputation: Predictive Mean Matching\"\n---\n\n## Overview\n\nPredictive mean matching is a technique for missing value imputation. It calculates the predicted value of the missing variable based on a regression model from complete data, then selects one value (from the observed) that produces the closest prediction. PMM is robust to transformation, less vulnerable to model misspecification. More theoretical details for PMM can be found [here](https://stefvanbuuren.name/fimd/sec-pmm.html).\n\nAssumption for PMM: distribution of missing is the same aas obsereved data of the candidates that produce the closest values to the predicted value by the missing entry.\n\n\n## Available R package\n\n[mice](https://amices.org/mice/index.html) is a powerful R package developed by Stef van Buuren, Karin Groothuis-Oudshoorn and other contributors. \n\nImplementation of PMM in `mice`: \n\n* Predictive mean matching, [mice.impute.pmm](https://amices.org/mice/reference/mice.impute.pmm.html)\n* Weighted predictive mean matching, [mice.impute.midastouch](https://amices.org/mice/reference/mice.impute.midastouch.html)\n* Multivariate predictive mean matching, [mice.impute.mpmm](https://amices.org/mice/reference/mice.impute.mpmm.html)\n\n## Example\n\nWe use the small dataset `nhanes` included in `mice` package. It has 25 rows, and three out of four variables have missings. \n\nThe original NHANES data is a large national level survey, some are publicly available via R package `nhanes`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(mice)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'mice'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:stats':\n\n filter\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:base':\n\n cbind, rbind\n```\n\n\n:::\n\n```{.r .cell-code}\n# load example dataset from mice\nhead(nhanes)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n age bmi hyp chl\n1 1 NA NA NA\n2 2 22.7 1 187\n3 1 NA 1 187\n4 3 NA NA NA\n5 1 20.4 1 113\n6 3 NA NA 184\n```\n\n\n:::\n\n```{.r .cell-code}\nsummary(nhanes)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n age bmi hyp chl \n Min. :1.00 Min. :20.40 Min. :1.000 Min. :113.0 \n 1st Qu.:1.00 1st Qu.:22.65 1st Qu.:1.000 1st Qu.:185.0 \n Median :2.00 Median :26.75 Median :1.000 Median :187.0 \n Mean :1.76 Mean :26.56 Mean :1.235 Mean :191.4 \n 3rd Qu.:2.00 3rd Qu.:28.93 3rd Qu.:1.000 3rd Qu.:212.0 \n Max. :3.00 Max. :35.30 Max. :2.000 Max. :284.0 \n NA's :9 NA's :8 NA's :10 \n```\n\n\n:::\n:::\n\n\n### Impute with PMM\n\nTo impute with PMM is straightforward: specify the method, `method = pmm`. \n\n\n::: {.cell}\n\n```{.r .cell-code}\nimp_pmm <- mice::mice(nhanes, method = 'pmm', m = 5, maxit = 10)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n iter imp variable\n 1 1 bmi hyp chl\n 1 2 bmi hyp chl\n 1 3 bmi hyp chl\n 1 4 bmi hyp chl\n 1 5 bmi hyp chl\n 2 1 bmi hyp chl\n 2 2 bmi hyp chl\n 2 3 bmi hyp chl\n 2 4 bmi hyp chl\n 2 5 bmi hyp chl\n 3 1 bmi hyp chl\n 3 2 bmi hyp chl\n 3 3 bmi hyp chl\n 3 4 bmi hyp chl\n 3 5 bmi hyp chl\n 4 1 bmi hyp chl\n 4 2 bmi hyp chl\n 4 3 bmi hyp chl\n 4 4 bmi hyp chl\n 4 5 bmi hyp chl\n 5 1 bmi hyp chl\n 5 2 bmi hyp chl\n 5 3 bmi hyp chl\n 5 4 bmi hyp chl\n 5 5 bmi hyp chl\n 6 1 bmi hyp chl\n 6 2 bmi hyp chl\n 6 3 bmi hyp chl\n 6 4 bmi hyp chl\n 6 5 bmi hyp chl\n 7 1 bmi hyp chl\n 7 2 bmi hyp chl\n 7 3 bmi hyp chl\n 7 4 bmi hyp chl\n 7 5 bmi hyp chl\n 8 1 bmi hyp chl\n 8 2 bmi hyp chl\n 8 3 bmi hyp chl\n 8 4 bmi hyp chl\n 8 5 bmi hyp chl\n 9 1 bmi hyp chl\n 9 2 bmi hyp chl\n 9 3 bmi hyp chl\n 9 4 bmi hyp chl\n 9 5 bmi hyp chl\n 10 1 bmi hyp chl\n 10 2 bmi hyp chl\n 10 3 bmi hyp chl\n 10 4 bmi hyp chl\n 10 5 bmi hyp chl\n```\n\n\n:::\n\n```{.r .cell-code}\nimp_pmm\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nClass: mids\nNumber of multiple imputations: 5 \nImputation methods:\n age bmi hyp chl \n \"\" \"pmm\" \"pmm\" \"pmm\" \nPredictorMatrix:\n age bmi hyp chl\nage 0 1 1 1\nbmi 1 0 1 1\nhyp 1 1 0 1\nchl 1 1 1 0\n```\n\n\n:::\n\n```{.r .cell-code}\n# imputations for bmi\nimp_pmm$imp$bmi\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 1 2 3 4 5\n1 26.3 28.7 27.2 27.2 26.3\n3 33.2 22.0 30.1 30.1 27.2\n4 22.5 25.5 25.5 20.4 27.5\n6 25.5 24.9 22.5 25.5 22.5\n10 25.5 26.3 27.4 27.4 27.4\n11 30.1 29.6 35.3 20.4 27.4\n12 27.2 27.5 20.4 20.4 22.0\n16 35.3 35.3 30.1 33.2 27.2\n21 30.1 29.6 20.4 30.1 30.1\n```\n\n\n:::\n:::\n\n\nAn alternative to the standard PMM is `midastouch`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimp_pmms <- mice::mice(nhanes, method = 'midastouch', m = 5, maxit = 10)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n iter imp variable\n 1 1 bmi hyp chl\n 1 2 bmi hyp chl\n 1 3 bmi hyp chl\n 1 4 bmi hyp chl\n 1 5 bmi hyp chl\n 2 1 bmi hyp chl\n 2 2 bmi hyp chl\n 2 3 bmi hyp chl\n 2 4 bmi hyp chl\n 2 5 bmi hyp chl\n 3 1 bmi hyp chl\n 3 2 bmi hyp chl\n 3 3 bmi hyp chl\n 3 4 bmi hyp chl\n 3 5 bmi hyp chl\n 4 1 bmi hyp chl\n 4 2 bmi hyp chl\n 4 3 bmi hyp chl\n 4 4 bmi hyp chl\n 4 5 bmi hyp chl\n 5 1 bmi hyp chl\n 5 2 bmi hyp chl\n 5 3 bmi hyp chl\n 5 4 bmi hyp chl\n 5 5 bmi hyp chl\n 6 1 bmi hyp chl\n 6 2 bmi hyp chl\n 6 3 bmi hyp chl\n 6 4 bmi hyp chl\n 6 5 bmi hyp chl\n 7 1 bmi hyp chl\n 7 2 bmi hyp chl\n 7 3 bmi hyp chl\n 7 4 bmi hyp chl\n 7 5 bmi hyp chl\n 8 1 bmi hyp chl\n 8 2 bmi hyp chl\n 8 3 bmi hyp chl\n 8 4 bmi hyp chl\n 8 5 bmi hyp chl\n 9 1 bmi hyp chl\n 9 2 bmi hyp chl\n 9 3 bmi hyp chl\n 9 4 bmi hyp chl\n 9 5 bmi hyp chl\n 10 1 bmi hyp chl\n 10 2 bmi hyp chl\n 10 3 bmi hyp chl\n 10 4 bmi hyp chl\n 10 5 bmi hyp chl\n```\n\n\n:::\n\n```{.r .cell-code}\nimp_pmm\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nClass: mids\nNumber of multiple imputations: 5 \nImputation methods:\n age bmi hyp chl \n \"\" \"pmm\" \"pmm\" \"pmm\" \nPredictorMatrix:\n age bmi hyp chl\nage 0 1 1 1\nbmi 1 0 1 1\nhyp 1 1 0 1\nchl 1 1 1 0\n```\n\n\n:::\n\n```{.r .cell-code}\nimp_pmms$imp$bmi\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 1 2 3 4 5\n1 35.3 29.6 27.2 30.1 29.6\n3 30.1 29.6 22.0 30.1 29.6\n4 21.7 21.7 25.5 27.2 21.7\n6 21.7 21.7 25.5 24.9 21.7\n10 24.9 22.7 22.0 30.1 27.4\n11 22.5 30.1 33.2 30.1 29.6\n12 30.1 28.7 22.0 30.1 22.7\n16 22.5 30.1 22.0 30.1 29.6\n21 35.3 30.1 33.2 30.1 29.6\n```\n\n\n:::\n:::\n\n\n# Reference\n\nStef van Buuren, Karin Groothuis-Oudshoorn (2011). mice: Multivariate Imputation by Chained Equations in R. Journal of Statistical Software, 45(3), 1-67. DOI 10.18637/jss.v045.i03\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/mi_mar_regression/figure-html/missing-pattern-1.png b/_freeze/R/mi_mar_regression/figure-html/missing-pattern-1.png index a66352f4a..2f1820bd4 100644 Binary files a/_freeze/R/mi_mar_regression/figure-html/missing-pattern-1.png and b/_freeze/R/mi_mar_regression/figure-html/missing-pattern-1.png differ diff --git a/_freeze/R/nonpara_wilcoxon_ranksum/execute-results/html.json b/_freeze/R/nonpara_wilcoxon_ranksum/execute-results/html.json index fd9eef2f6..7fe09484f 100644 --- a/_freeze/R/nonpara_wilcoxon_ranksum/execute-results/html.json +++ b/_freeze/R/nonpara_wilcoxon_ranksum/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "c72d4119c9f93c55e409825cd8c5d143", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Wilcoxon Rank Sum (Mann Whitney-U) in R\"\n---\n\n\n\n# Overview\n\nWilcoxon rank sum test, or equivalently, Mann-Whitney U-test is a rank based non-parametric method. The aim is to compare two independent groups of observations. Under certain scenarios, it can be thought of as a test for median differences, however this is only valid when: 1) both samples are independent and identically distributed (same dispersion, same shape, not necessarily normal) and 2) are symmetric around their medians.\n\nGenerally, with two samples of observations (A and B), the test uses the mean of each possible pair of observations in each group (including the pair of each value with itself) to test if the probability that (A\\>B) \\> probability (B\\>A).\n\nThe Wilcoxon rank sum test is often presented alongside a Hodges-Lehmann estimate of the pseudo-median (the median of the Walsh averages), and an associated confidence interval for the pseudo-median.\n\nA tie in the data exists when an observation in group A, has the same result as an observation in group B.\n\n## Available R packages\n\nThere are three main implementations of the Wilcoxon rank sum test in R.\n\n- [stats::wilcox.test()](https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/wilcox.test)\n\n- [coin::wilcox_test()](https://cran.r-project.org/web/packages/coin/coin.pdf)\n\n- [asht::wmwTest()](https://cran.r-project.org/web/packages/asht/asht.pdf)\n\nThe `stats` package implements various classic statistical tests, including Wilcoxon rank sum test. Although this is arguably the most commonly applied package, this one does not account for any ties in the data. To account for ties in the data, the `coin` or `asht` package should be used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# x, y are two unpaired vectors. Do not necessary need to be of the same length.\nstats::wilcox.test(x, y, paired = FALSE)\n```\n:::\n\n\n## Example: Birth Weight\n\n*Data source: Table 30.4, Kirkwood BR. and Sterne JAC. Essentials of medical statistics. Second Edition. ISBN 978-0-86542-871-3*\n\nComparison of birth weights (kg) of children born to 15 non-smokers with those of children born to 14 heavy smokers.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# bw_ns: non smokers\n# bw_s: smokers\n# fmt: skip\nbw_ns <- c(3.99, 3.89, 3.6, 3.73, 3.31, \n 3.7, 4.08, 3.61, 3.83, 3.41, \n 4.13, 3.36, 3.54, 3.51, 2.71)\n# fmt: skip\nbw_s <- c(3.18, 2.74, 2.9, 3.27, 3.65, \n 3.42, 3.23, 2.86, 3.6, 3.65, \n 3.69, 3.53, 2.38, 2.34)\n```\n:::\n\n\nWe do note that there are ties present in the data. Can visualize the data on two histograms. Red lines indicate the location of medians.\n\n\n::: {.cell}\n\n```{.r .cell-code}\npar(mfrow = c(1, 2))\nhist(bw_ns, main = 'Birthweight: non-smokers')\nabline(v = median(bw_ns), col = 'red', lwd = 2)\nhist(bw_s, main = 'Birthweight: smokers')\nabline(v = median(bw_s), col = 'red', lwd = 2)\n```\n\n::: {.cell-output-display}\n![](nonpara_wilcoxon_ranksum_files/figure-html/unnamed-chunk-4-1.png){width=672}\n:::\n:::\n\n\nIt is possible to see that for non-smokers, the median birthweight is higher than those of smokers. Now we can formally test it with wilcoxon rank sum test.\n\n## stats::wilcox.test()\n\nIn `stats::wilcox.test()` the exact p-value is computed when there are less than 50 values and no ties otherwise the normal approximation is used. In our data case, because there are ties the normal approximation is used.\n\nThe default for the normal approximation is to use a continuity correction. One can add the argument `correct=FALSE` to not perform a continuity correction.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# default is two sided\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE): cannot compute\nexact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.01001\nalternative hypothesis: true location shift is not equal to 0\n```\n\n\n:::\n\n```{.r .cell-code}\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, correct = FALSE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, correct = FALSE):\ncannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.009392\nalternative hypothesis: true location shift is not equal to 0\n```\n\n\n:::\n:::\n\n\nWe can also carry out a one-sided test, by specifying `alternative = \"less\"` (if the first group is expected to be smaller than the second group) or `alternative = \"greater\"`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# perform one-sided test\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, alternative = \"less\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, alternative =\n\"less\"): cannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.005003\nalternative hypothesis: true location shift is less than 0\n```\n\n\n:::\n:::\n\n\nBy setting `conf.int=TRUE` a confidence interval of the location parameter (x-y) is computed. Note that in the two-sample case the estimator for the difference in location parameters does not estimate the difference in medians (a common misconception) but rather the median of the difference between a sample from x and a sample from y. Note that the algorithm used for the estimation of the location parameter and confidence interval is not discussed in the help of the function (in the source code of the `stats::wilcox.test()` it is only mentioned that \"Algorithm not published, thus better documented here.\").\n\nBy default a 95% confidence interval is provided. This can be changed by the argument `conf.level`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Add conf.int = TRUE\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, conf.int = TRUE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE):\ncannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE):\ncannot compute exact confidence intervals with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.01001\nalternative hypothesis: true location shift is not equal to 0\n95 percent confidence interval:\n -0.76995896 -0.09000999\nsample estimates:\ndifference in location \n -0.4261377 \n```\n\n\n:::\n:::\n\n\nThe argument `exact = TRUE` can be added to ask for an exact p-value to be computed. However, in our data case as there are ties this does not work.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# force exact, but does not work because we have ties\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, conf.int = TRUE, exact = TRUE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE, :\ncannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE, :\ncannot compute exact confidence intervals with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.01001\nalternative hypothesis: true location shift is not equal to 0\n95 percent confidence interval:\n -0.76995896 -0.09000999\nsample estimates:\ndifference in location \n -0.4261377 \n```\n\n\n:::\n:::\n\n\n## coin::wilcox_test()\n\nIn order to account for the ties, `wilcox_test` from the `coin` package should be used. For this function, the data needs to be inputted via a formula where the right hand side is a factor, so we need to create a dataset. In order to get results for *smokers - non-smokers* we need to relevel the factors.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsmk_data <- data.frame(\n value = c(bw_ns, bw_s),\n smoke = as.factor(rep(c(\"non\", \"smoke\"), c(length(bw_ns), length(bw_s))))\n)\nsmk_data$smoke <- forcats::fct_relevel(smk_data$smoke, \"smoke\")\nsmk_data$smoke\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [1] non non non non non non non non non non non non \n[13] non non non smoke smoke smoke smoke smoke smoke smoke smoke smoke\n[25] smoke smoke smoke smoke smoke\nLevels: smoke non\n```\n\n\n:::\n:::\n\n\nNow the data is in the right shape we can run `wilcox_test`. By default, `coin::wilcox_test` does a normal approximation approach without continuity correction. One can add again `alternative=\"less\"` (or `alternative=\"greater\"`) for one-sided testing.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(value ~ smoke, data = smk_data)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.009392\nalternative hypothesis: true mu is not equal to 0\n```\n\n\n:::\n:::\n\n\nWe do note that a normal approximation approach with continuity correction cannot be obtained with this function. One can add `correct=TRUE`, but note that no error is given and the results of a normal approximation approach without continuity correction is provided.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(value ~ smoke, data = smk_data, correct = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.009392\nalternative hypothesis: true mu is not equal to 0\n```\n\n\n:::\n:::\n\n\nBy including the `conf.int = TRUE` argument, confidence intervals for the difference in location are computed. According to the `coin` package documentation this is done according to Bauer (1972) \\[Bauer, D. F. (1972). Constructing confidence sets using rank statistics. Journal of the American Statistical Association 67(339), 687–690\\] and Hollander and Wolfe (1999) \\[Hollander, M. and Wolfe, D. A. (1999). Nonparametric Statistical Methods, Second Edition. New York: John Wiley & Sons.\\]. Note that the `conf.level` argument controls the confidence level, but must be used with `conf.int = TRUE` otherwise you won't get a confidence interval.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(value ~ smoke, data = smk_data, conf.int = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.009392\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.76000001 -0.09999999\nsample estimates:\ndifference in location \n -0.4261403 \n```\n\n\n:::\n:::\n\n\nUsing `coin` one can calculate exact and Monte Carlo conditional p-values using the `distribution` argument. The exact p-value is best used in small sample sizes.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(\n value ~ smoke,\n data = smk_data,\n conf.int = TRUE,\n distribution = \"exact\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.008181\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.76 -0.10\nsample estimates:\ndifference in location \n -0.425 \n```\n\n\n:::\n:::\n\n\n\nFor doing an approximative (Monte Carlo) (with 500 and 500000 samples) the following code can be used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(\n value ~ smoke,\n data = smk_data,\n conf.int = TRUE,\n distribution = approximate(nresample = 500)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tApproximative Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.004\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.77 -0.10\nsample estimates:\ndifference in location \n -0.425 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::wilcox_test(\n value ~ smoke,\n data = smk_data,\n conf.int = TRUE,\n distribution = approximate(nresample = 500000)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tApproximative Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.008208\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.76 -0.10\nsample estimates:\ndifference in location \n -0.425 \n```\n\n\n:::\n:::\n\n\n\n\n\n## asht::wmwTest()\n\nThe `asht::wmwTest()` function calculates the Wilcoxon-Mann-Whitney test (normal approximation, exact complete enumeration, and exact Monte Carlo implementation) together with confidence intervals on the Mann-Whitney parameter, Pr[X5000$, where $m$ and $n$ are the sample sizes in the two groups, respectively. Otherwise by default (thus for small sample sizes), the exact Wilcoxon rank sum test is performed. The `correct` argument is available to turn-off the continuity correction. The `alternative` argument is available for one-sided testing. By default, the 95% confidence interval is calculated for the Mann-Whitney parameter (use argument `conf.int` and `conf.level` to change these defaults.). Details on the calculation of the confidence interval are provided in Newcombe (2006) \\[Newcombe, Robert G. (2006). Confidence intervals for an effect size measure based on the Mann-Whitney statistic. Part 2: asymptotic methods and evaluation. Statistics in medicine 25(4): 559-573\\].\n\n\n::: {.cell}\n\n```{.r .cell-code}\nasht::wmwTest(bw_s, bw_ns)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon-Mann-Whitney test with continuity correction (confidence\n\tinterval requires proportional odds assumption, but test does not)\n\ndata: bw_s and bw_ns\nMann-Whitney estimate = 0.78333, tie factor = 0.99951, p-value =\n0.01001\nalternative hypothesis: two distributions are not equal\n95 percent confidence interval:\n 0.5696522 0.9030435\nsample estimates:\nMann-Whitney estimate \n 0.7833333 \n```\n\n\n:::\n:::\n\n\nUsing the `method` argument one can change from normal approximation to exact complete enumeration (`method = \"exact.ce\"`), and exact Monte Carlo (`method = \"exact.mc\"`) implementation. When `method = \"exact.mc\"`, the test is implemented using complete enumeration of all permutations, and hence is only tractible for very small sample sizes (less than 10 in each group). Here, we show an example of `method = \"exact.mc\"`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nasht::wmwTest(\n bw_s,\n bw_ns,\n method = \"exact.mc\",\n control = asht::wmwControl(nMC = 100000)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\texact Wilcoxon-Man-Whitney test (Monte Carlo with nMC=1e+05)\n\t(confidence interval requires proportional odds assumption, but test\n\tdoes not)\n\ndata: bw_s and bw_ns\nMann-Whitney estimate = 0.78333, p-value = 0.00788\nalternative hypothesis: two distributions are not equal\n95 percent confidence interval:\n 0.5796940 0.9292439\nsample estimates:\nMann-Whitney estimate \n 0.7833333 \n```\n\n\n:::\n:::\n\n\n\n## Useful References\n\n- [Methods and Formulae](https://support.minitab.com/en-us/minitab/help-and-how-to/statistics/nonparametrics/how-to/1-sample-wilcoxon/methods-and-formulas/methods-and-formulas)\n- [Mann Whitney is not about medians in general](https://github.com/adrianolszewski/Mann-Whitney-is-not-about-medians-in-general/blob/main/Mann_Whitney_Wilcoxon_fails_as_test_of_medians_literature.md)\n- [Relationship between walsh averages and WRS](https://stats.stackexchange.com/questions/215889/prove-the-relationship-between-walsh-averages-and-wilcoxon-signed-rank-test)\n- [Hodges Lehmann Problems](https://aakinshin.net/posts/r-hodges-lehmann-problems)\n", + "markdown": "---\ntitle: \"Wilcoxon Rank Sum (Mann Whitney-U) in R\"\n---\n\n\n\n# Overview\n\nWilcoxon rank sum test, or equivalently, Mann-Whitney U-test is a rank based non-parametric method. The aim is to compare two independent groups of observations. Under certain scenarios, it can be thought of as a test for median differences, however this is only valid when: 1) both samples are independent and identically distributed (same dispersion, same shape, not necessarily normal) and 2) are symmetric around their medians.\n\nGenerally, with two samples of observations (A and B), the test uses the mean of each possible pair of observations in each group (including the pair of each value with itself) to test if the probability that (A\\>B) \\> probability (B\\>A).\n\nThe Wilcoxon rank sum test is often presented alongside a Hodges-Lehmann estimate of the pseudo-median (the median of the Walsh averages), and an associated confidence interval for the pseudo-median.\n\nA tie in the data exists when an observation in group A, has the same result as an observation in group B.\n\n## Available R packages\n\nThere are three main implementations of the Wilcoxon rank sum test in R.\n\n- [stats::wilcox.test()](https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/wilcox.test)\n\n- [coin::wilcox_test()](https://cran.r-project.org/web/packages/coin/coin.pdf)\n\n- [asht::wmwTest()](https://cran.r-project.org/web/packages/asht/asht.pdf)\n\nThe `stats` package implements various classic statistical tests, including Wilcoxon rank sum test. Although this is arguably the most commonly applied package, this one does not account for any ties in the data. To account for ties in the data, the `coin` or `asht` package should be used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# x, y are two unpaired vectors. Do not necessary need to be of the same length.\nstats::wilcox.test(x, y, paired = FALSE)\n```\n:::\n\n\n## Example: Birth Weight\n\n*Data source: Table 30.4, Kirkwood BR. and Sterne JAC. Essentials of medical statistics. Second Edition. ISBN 978-0-86542-871-3*\n\nComparison of birth weights (kg) of children born to 15 non-smokers with those of children born to 14 heavy smokers.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# bw_ns: non smokers\n# bw_s: smokers\n# fmt: skip\nbw_ns <- c(3.99, 3.89, 3.6, 3.73, 3.31, \n 3.7, 4.08, 3.61, 3.83, 3.41, \n 4.13, 3.36, 3.54, 3.51, 2.71)\n# fmt: skip\nbw_s <- c(3.18, 2.74, 2.9, 3.27, 3.65, \n 3.42, 3.23, 2.86, 3.6, 3.65, \n 3.69, 3.53, 2.38, 2.34)\n```\n:::\n\n\nWe do note that there are ties present in the data. Can visualize the data on two histograms. Red lines indicate the location of medians.\n\n\n::: {.cell}\n\n```{.r .cell-code}\npar(mfrow = c(1, 2))\nhist(bw_ns, main = 'Birthweight: non-smokers')\nabline(v = median(bw_ns), col = 'red', lwd = 2)\nhist(bw_s, main = 'Birthweight: smokers')\nabline(v = median(bw_s), col = 'red', lwd = 2)\n```\n\n::: {.cell-output-display}\n![](nonpara_wilcoxon_ranksum_files/figure-html/unnamed-chunk-4-1.png){width=672}\n:::\n:::\n\n\nIt is possible to see that for non-smokers, the median birthweight is higher than those of smokers. Now we can formally test it with wilcoxon rank sum test.\n\n## stats::wilcox.test()\n\nIn `stats::wilcox.test()` the exact p-value is computed when there are less than 50 values and no ties otherwise the normal approximation is used. In our data case, because there are ties the normal approximation is used.\n\nThe default for the normal approximation is to use a continuity correction. One can add the argument `correct=FALSE` to not perform a continuity correction.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# default is two sided\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE): cannot compute\nexact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.01001\nalternative hypothesis: true location shift is not equal to 0\n```\n\n\n:::\n\n```{.r .cell-code}\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, correct = FALSE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, correct = FALSE):\ncannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.009392\nalternative hypothesis: true location shift is not equal to 0\n```\n\n\n:::\n:::\n\n\nWe can also carry out a one-sided test, by specifying `alternative = \"less\"` (if the first group is expected to be smaller than the second group) or `alternative = \"greater\"`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# perform one-sided test\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, alternative = \"less\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, alternative =\n\"less\"): cannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.005003\nalternative hypothesis: true location shift is less than 0\n```\n\n\n:::\n:::\n\n\nBy setting `conf.int=TRUE` a confidence interval of the location parameter (x-y) is computed. Note that in the two-sample case the estimator for the difference in location parameters does not estimate the difference in medians (a common misconception) but rather the median of the difference between a sample from x and a sample from y. Note that the algorithm used for the estimation of the location parameter and confidence interval is not discussed in the help of the function (in the source code of the `stats::wilcox.test()` it is only mentioned that \"Algorithm not published, thus better documented here.\").\n\nBy default a 95% confidence interval is provided. This can be changed by the argument `conf.level`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Add conf.int = TRUE\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, conf.int = TRUE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE):\ncannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE):\ncannot compute exact confidence intervals with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.01001\nalternative hypothesis: true location shift is not equal to 0\n95 percent confidence interval:\n -0.76995896 -0.09000999\nsample estimates:\ndifference in location \n -0.4261377 \n```\n\n\n:::\n:::\n\n\nThe argument `exact = TRUE` can be added to ask for an exact p-value to be computed. However, in our data case as there are ties this does not work.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# force exact, but does not work because we have ties\nstats::wilcox.test(bw_s, bw_ns, paired = FALSE, conf.int = TRUE, exact = TRUE)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE, :\ncannot compute exact p-value with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in wilcox.test.default(bw_s, bw_ns, paired = FALSE, conf.int = TRUE, :\ncannot compute exact confidence intervals with ties\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon rank sum test with continuity correction\n\ndata: bw_s and bw_ns\nW = 45.5, p-value = 0.01001\nalternative hypothesis: true location shift is not equal to 0\n95 percent confidence interval:\n -0.76995896 -0.09000999\nsample estimates:\ndifference in location \n -0.4261377 \n```\n\n\n:::\n:::\n\n\n## coin::wilcox_test()\n\nIn order to account for the ties, `wilcox_test` from the `coin` package should be used. For this function, the data needs to be inputted via a formula where the right hand side is a factor, so we need to create a dataset. In order to get results for *smokers - non-smokers* we need to relevel the factors.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsmk_data <- data.frame(\n value = c(bw_ns, bw_s),\n smoke = as.factor(rep(c(\"non\", \"smoke\"), c(length(bw_ns), length(bw_s))))\n)\nsmk_data$smoke <- forcats::fct_relevel(smk_data$smoke, \"smoke\")\nsmk_data$smoke\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n [1] non non non non non non non non non non non non \n[13] non non non smoke smoke smoke smoke smoke smoke smoke smoke smoke\n[25] smoke smoke smoke smoke smoke\nLevels: smoke non\n```\n\n\n:::\n:::\n\n\nNow the data is in the right shape we can run `wilcox_test`. By default, `coin::wilcox_test` does a normal approximation approach without continuity correction. One can add again `alternative=\"less\"` (or `alternative=\"greater\"`) for one-sided testing.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(value ~ smoke, data = smk_data)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.009392\nalternative hypothesis: true mu is not equal to 0\n```\n\n\n:::\n:::\n\n\nWe do note that a normal approximation approach with continuity correction cannot be obtained with this function. One can add `correct=TRUE`, but note that no error is given and the results of a normal approximation approach without continuity correction is provided.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(value ~ smoke, data = smk_data, correct = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.009392\nalternative hypothesis: true mu is not equal to 0\n```\n\n\n:::\n:::\n\n\nBy including the `conf.int = TRUE` argument, confidence intervals for the difference in location are computed. According to the `coin` package documentation this is done according to Bauer (1972) \\[Bauer, D. F. (1972). Constructing confidence sets using rank statistics. Journal of the American Statistical Association 67(339), 687–690\\] and Hollander and Wolfe (1999) \\[Hollander, M. and Wolfe, D. A. (1999). Nonparametric Statistical Methods, Second Edition. New York: John Wiley & Sons.\\]. Note that the `conf.level` argument controls the confidence level, but must be used with `conf.int = TRUE` otherwise you won't get a confidence interval.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(value ~ smoke, data = smk_data, conf.int = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tAsymptotic Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.009392\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.76000001 -0.09999999\nsample estimates:\ndifference in location \n -0.4261403 \n```\n\n\n:::\n:::\n\n\nUsing `coin` one can calculate exact and Monte Carlo conditional p-values using the `distribution` argument. The exact p-value is best used in small sample sizes.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(\n value ~ smoke,\n data = smk_data,\n conf.int = TRUE,\n distribution = \"exact\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tExact Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.008181\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.76 -0.10\nsample estimates:\ndifference in location \n -0.425 \n```\n\n\n:::\n:::\n\n\n\nFor doing an approximative (Monte Carlo) (with 500 and 500000 samples) the following code can be used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncoin::wilcox_test(\n value ~ smoke,\n data = smk_data,\n conf.int = TRUE,\n distribution = approximate(nresample = 500)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tApproximative Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.012\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.76 -0.09\nsample estimates:\ndifference in location \n -0.425 \n```\n\n\n:::\n\n```{.r .cell-code}\ncoin::wilcox_test(\n value ~ smoke,\n data = smk_data,\n conf.int = TRUE,\n distribution = approximate(nresample = 500000)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tApproximative Wilcoxon-Mann-Whitney Test\n\ndata: value by smoke (smoke, non)\nZ = -2.5974, p-value = 0.008244\nalternative hypothesis: true mu is not equal to 0\n95 percent confidence interval:\n -0.77 -0.10\nsample estimates:\ndifference in location \n -0.425 \n```\n\n\n:::\n:::\n\n\n\n\n\n## asht::wmwTest()\n\nThe `asht::wmwTest()` function calculates the Wilcoxon-Mann-Whitney test (normal approximation, exact complete enumeration, and exact Monte Carlo implementation) together with confidence intervals on the Mann-Whitney parameter, Pr[X5000$, where $m$ and $n$ are the sample sizes in the two groups, respectively. Otherwise by default (thus for small sample sizes), the exact Wilcoxon rank sum test is performed. The `correct` argument is available to turn-off the continuity correction. The `alternative` argument is available for one-sided testing. By default, the 95% confidence interval is calculated for the Mann-Whitney parameter (use argument `conf.int` and `conf.level` to change these defaults.). Details on the calculation of the confidence interval are provided in Newcombe (2006) \\[Newcombe, Robert G. (2006). Confidence intervals for an effect size measure based on the Mann-Whitney statistic. Part 2: asymptotic methods and evaluation. Statistics in medicine 25(4): 559-573\\].\n\n\n::: {.cell}\n\n```{.r .cell-code}\nasht::wmwTest(bw_s, bw_ns)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWilcoxon-Mann-Whitney test with continuity correction (confidence\n\tinterval requires proportional odds assumption, but test does not)\n\ndata: bw_s and bw_ns\nMann-Whitney estimate = 0.78333, tie factor = 0.99951, p-value =\n0.01001\nalternative hypothesis: two distributions are not equal\n95 percent confidence interval:\n 0.5696522 0.9030435\nsample estimates:\nMann-Whitney estimate \n 0.7833333 \n```\n\n\n:::\n:::\n\n\nUsing the `method` argument one can change from normal approximation to exact complete enumeration (`method = \"exact.ce\"`), and exact Monte Carlo (`method = \"exact.mc\"`) implementation. When `method = \"exact.mc\"`, the test is implemented using complete enumeration of all permutations, and hence is only tractible for very small sample sizes (less than 10 in each group). Here, we show an example of `method = \"exact.mc\"`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nasht::wmwTest(\n bw_s,\n bw_ns,\n method = \"exact.mc\",\n control = asht::wmwControl(nMC = 100000)\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\texact Wilcoxon-Man-Whitney test (Monte Carlo with nMC=1e+05)\n\t(confidence interval requires proportional odds assumption, but test\n\tdoes not)\n\ndata: bw_s and bw_ns\nMann-Whitney estimate = 0.78333, p-value = 0.00808\nalternative hypothesis: two distributions are not equal\n95 percent confidence interval:\n 0.5780199 0.9316202\nsample estimates:\nMann-Whitney estimate \n 0.7833333 \n```\n\n\n:::\n:::\n\n\n\n## Useful References\n\n- [Methods and Formulae](https://support.minitab.com/en-us/minitab/help-and-how-to/statistics/nonparametrics/how-to/1-sample-wilcoxon/methods-and-formulas/methods-and-formulas)\n- [Mann Whitney is not about medians in general](https://github.com/adrianolszewski/Mann-Whitney-is-not-about-medians-in-general/blob/main/Mann_Whitney_Wilcoxon_fails_as_test_of_medians_literature.md)\n- [Relationship between walsh averages and WRS](https://stats.stackexchange.com/questions/215889/prove-the-relationship-between-walsh-averages-and-wilcoxon-signed-rank-test)\n- [Hodges Lehmann Problems](https://aakinshin.net/posts/r-hodges-lehmann-problems)\n", "supporting": [ "nonpara_wilcoxon_ranksum_files" ], diff --git a/_freeze/R/nonpara_wilcoxon_ranksum/figure-html/unnamed-chunk-4-1.png b/_freeze/R/nonpara_wilcoxon_ranksum/figure-html/unnamed-chunk-4-1.png index dfe91e7ba..a3bf66ed2 100644 Binary files a/_freeze/R/nonpara_wilcoxon_ranksum/figure-html/unnamed-chunk-4-1.png and b/_freeze/R/nonpara_wilcoxon_ranksum/figure-html/unnamed-chunk-4-1.png differ diff --git a/_freeze/R/random_effects_models/execute-results/html.json b/_freeze/R/random_effects_models/execute-results/html.json new file mode 100644 index 000000000..ce8defcfe --- /dev/null +++ b/_freeze/R/random_effects_models/execute-results/html.json @@ -0,0 +1,15 @@ +{ + "hash": "b557ed28b5df30777e45c980c226a5d1", + "result": { + "engine": "knitr", + "markdown": "---\ntitle: \"Random Effects Models in R\"\n---\n\n## Fitting Random Effects Models in R\n\nIn a classical regression model, coefficients in the model are fixed across all observations and observations are assumed to be independent. Mixed effects models introduce random coefficients to the model, called random effects, which vary randomly between different groups of observations. The introduction of random effects leads to observations within a group being correlated.\n\n### Setting up the model\n\nThe **lme4** package is one of several packages that can be used to implement random effects models.\n\nAs an example, suppose that we want the intercept in the model to vary randomly between participants, in other words, a random constant is included in the model which is different for each participant. This is achieved by adding (1 | USUBJID) to the model formula.\n\n`lme4::lmer(AVAL ~ TRTP + ... + (1 | USUBJID))`\n\nWithin the output of the summary function the estimated variance of the random effect(s) can be found, along with the estimated fixed effect coefficients.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/random_effects_models/r_pict1.png){fig-align='center' width=50%}\n:::\n:::\n\n\nTo allow the coefficient of TRTP to also vary randomly by participant, use(1 + TRTP | USUBJID), or equivalently just (TRTP | USUBJID). Under this notation, the random intercept and random coefficient of TRTP are correlated. To assume that they are not correlated, use (1 | USUBJID) + (0 + TRTP | USUBJID) or equivalently (TRTP || USUBJID). Further details of the notation can be found in [Fitting Linear Mixed-Effects Models Using lme4](https://www.jstatsoft.org/index.php/jss/article/view/v067i01/946).\n\n### Inference on a single coefficient\n\nThe lme4 package does not calculate degrees of freedom or p values, but these can be calculated using the lmerTest package. Degrees of freedom and p-values for fixed effects can be found in the summary output after loading the lmerTest package, summary(model, ddf=\"Kenward-Roger\"). Either the Satterthwaite (ddf=\"Satterthwaite\") or Kenward-Roger (ddf=\"Kenward-Roger\") method can be used for calculating degrees of freedom.\nFor confidence intervals, the confint function will calculate confidence intervals using the Wald (method = “Wald”) or profile likelihood (method = “profile”) methods. The Wald method uses the normal distribution (infinite degrees of freedom) rather than the t-distribution. Confidence intervals using the t-distribution can either be calculated manually or by constructing a contrast (see below).\n\n### Inference on a contrast\n\nThe lmerTest::contest1D function can be used to calculate and test a contrast, for example contest1D(model, c(0, 1, 0, 0, 1, 0), ddf = \"Kenward-Roger\"). An alternative is to use the emmeans package, which can also calculate estimates marginal means (least square means). Confidence intervals for contrasts constructed using emmeans can be calculated via the confint function.\n\n", + "supporting": [], + "filters": [ + "rmarkdown/pagebreak.lua" + ], + "includes": {}, + "engineDependencies": {}, + "preserve": {}, + "postProcess": true + } +} \ No newline at end of file diff --git a/_freeze/R/rbmi_continuous_joint/execute-results/html.json b/_freeze/R/rbmi_continuous_joint/execute-results/html.json index 25c2983e4..e17700693 100644 --- a/_freeze/R/rbmi_continuous_joint/execute-results/html.json +++ b/_freeze/R/rbmi_continuous_joint/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "78c3df12dbad314967a5905b5d4100a6", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Reference-Based Multiple Imputation (joint modelling): Continuous Data\"\n---\n\n## Libraries\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# General\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(gt)\nlibrary(labelled)\n\n# Methodlolgy specific\nlibrary(mmrm)\nlibrary(emmeans)\nlibrary(rbmi)\nlibrary(mice) # only used md.pattern()\n```\n:::\n\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\nReference-based multiple imputation methods have become popular for handling missing data, as well as for conducting sensitivity analyses, in randomized clinical trials. In the context of a repeatedly measured continuous endpoint assuming a multivariate normal model, [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911) proposed a framework to extend the usual MAR-based MI approach by postulating assumptions about the joint distribution of pre- and post-deviation data. Under this framework, one makes qualitative assumptions about how individuals’ missing outcomes relate to those observed in relevant groups in the trial, based on plausible clinical scenarios. Statistical analysis then proceeds using the method of multiple imputation ([Rubin 1976](https://doi.org/10.1093/biomet/63.3.581), [Rubin 1987]((https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696))).\n\nIn general, multiple imputation of a repeatedly measured continuous outcome can be done via 2 computational routes ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)):\n\n 1. Stepwise: split problem into separate imputations of data at each visit\n \n + requires monotone missingness, such as missingness due to withdrawal\n \n + conditions on the imputed values at previous visit\n \n + Bayesian linear regression problem is much simpler with monotone missing, as one can sample directly using conjugate priors\n\n 2. One-step approach (joint modelling): Fit a Bayesian full multivariate normal repeated measures model using MCMC and then draw a sample.\n\nHere, we illustrate reference-based multiple imputation of a continuous outcome measured repeatedly via the so-called one-step approach.\n\n### rbmi package\nThe `rbmi` package [Gower-Page et al. (2022)](https://joss.theoj.org/papers/10.21105/joss.04251) will be used for the one-step approach of the reference-based multiple imputation using R. The package implements standard and reference based multiple imputation methods for continuous longitudinal endpoints . In particular, this package supports deterministic conditional mean imputation and jackknifing as described in [Wolbers et al. (2022)](https://onlinelibrary.wiley.com/doi/full/10.1002/pst.2234), convential MI based on Bayesian posterior draws as described in [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911), and bootstrapped maximum likelihood imputation as described in [von Hippel and Bartlett (2021)](https://doi.org/10.1214/20-STS793).\n\nThe following standard and reference-based multiple imputation approaches will be illustrated here: \n \n * MAR (Missing At Random)\n \n * CIR (Copy Increment from Reference)\n \n * J2R (Jump to Reference)\n \n * CR (Copy Reference)\n\n\n## Data used\nA publicly available example [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial of an active drug versus placebo is used. Overall, data of 172 patients is available with 88 patients receiving placebo and 84 receiving active drug. This data is also used in the `rbmi` package [quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html).\n\nThe relevant endpoint is the Hamilton 17-item depression rating scale (HAMD17) which was assessed at baseline and at weeks 1, 2, 4, and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects from the active drug and 26% (23/88) of subjects from placebo. All data after study drug discontinuation are missing. \n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"antidepressant_data\")\ndat <- antidepressant_data |>\n dplyr::select(\n PATIENT,\n GENDER,\n THERAPY,\n RELDAYS,\n VISIT,\n BASVAL,\n HAMDTL17,\n CHANGE\n ) |>\n dplyr::mutate(THERAPY = factor(THERAPY, levels = c(\"PLACEBO\", \"DRUG\"))) |>\n labelled::remove_labels()\n\ngt(head(dat, n = 10))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1503FDRUG743221-11
1503FDRUG1453220-12
1503FDRUG2863219-13
1503FDRUG4273217-15
1507FPLACEBO741411-3
1507FPLACEBO15514140
1507FPLACEBO296149-5
1507FPLACEBO427145-9
1509FDRUG742120-1
1509FDRUG1452118-3
\n
\n```\n\n:::\n:::\n\n\nThe number of patients per visit and arm are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n group_by(VISIT, THERAPY) |>\n dplyr::summarise(N = n())\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n`summarise()` has regrouped the output.\nℹ Summaries were computed grouped by VISIT and THERAPY.\nℹ Output is grouped by VISIT.\nℹ Use `summarise(.groups = \"drop_last\")` to silence this message.\nℹ Use `summarise(.by = c(VISIT, THERAPY))` for per-operation grouping\n (`?dplyr::dplyr_by`) instead.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 3\n# Groups: VISIT [4]\n VISIT THERAPY N\n \n1 4 PLACEBO 88\n2 4 DRUG 84\n3 5 PLACEBO 81\n4 5 DRUG 77\n5 6 PLACEBO 76\n6 6 DRUG 73\n7 7 PLACEBO 65\n8 7 DRUG 64\n```\n\n\n:::\n:::\n\n\nThe mean change from baseline of the endpoint (Hamilton 17-item depression rating scale, HAMD17) per visit per treatment group using only the complete cases are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n group_by(VISIT, THERAPY) |>\n dplyr::summarise(N = n(), MEAN = mean(CHANGE))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n`summarise()` has regrouped the output.\nℹ Summaries were computed grouped by VISIT and THERAPY.\nℹ Output is grouped by VISIT.\nℹ Use `summarise(.groups = \"drop_last\")` to silence this message.\nℹ Use `summarise(.by = c(VISIT, THERAPY))` for per-operation grouping\n (`?dplyr::dplyr_by`) instead.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 4\n# Groups: VISIT [4]\n VISIT THERAPY N MEAN\n \n1 4 PLACEBO 88 -1.51\n2 4 DRUG 84 -1.82\n3 5 PLACEBO 81 -2.70\n4 5 DRUG 77 -4.71\n5 6 PLACEBO 76 -4.07\n6 6 DRUG 73 -6.79\n7 7 PLACEBO 65 -5.14\n8 7 DRUG 64 -8.34\n```\n\n\n:::\n:::\n\n\nThe missingness pattern is show below (1=observed data point (blue), 0=missing data point (red)). The incomplete data is primarily monotone in nature. 128 patients have complete data for all visits (all 1's at each visit). 20, 10 and 13 patients have 1, 2 or 3 monotone missing data, respectively. Further, there is a single additional intermittent missing observation (patient 3618).\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_wide = dat |>\n dplyr::select(PATIENT, VISIT, CHANGE) |>\n pivot_wider(\n id_cols = PATIENT,\n names_from = VISIT,\n names_prefix = \"VISIT_\",\n values_from = CHANGE\n )\n\ndat_wide |>\n dplyr::select(starts_with(\"VISIT_\")) |>\n mice::md.pattern(plot = TRUE, rotate.names = TRUE)\n```\n\n::: {.cell-output-display}\n![](rbmi_continuous_joint_files/figure-html/explore data 2-1.png){width=672}\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n VISIT_4 VISIT_5 VISIT_6 VISIT_7 \n128 1 1 1 1 0\n20 1 1 1 0 1\n10 1 1 0 0 2\n1 1 0 1 1 1\n13 1 0 0 0 3\n 0 14 23 43 80\n```\n\n\n:::\n:::\n\n\n## Complete case analysis\n\nA complete case analysis is performed using mixed model for repeated measures (MMRM) with covariates: treatment [THERAPY], gender [GENDER], visit [VISIT] as factors; baseline score [BASVAL] as continuous; and visit-by-treatment [THERAPY * VISIT] interaction, and visit-by-baseline [BASVAL * VISIT] interaction. An unstructured covariance matrix is used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm_fit = mmrm::mmrm(\n CHANGE ~\n 1 +\n THERAPY +\n GENDER +\n VISIT +\n BASVAL +\n THERAPY * VISIT +\n BASVAL * VISIT +\n us(VISIT | PATIENT),\n data = dat,\n reml = TRUE\n)\nsummary(mmrm_fit)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nmmrm fit\n\nFormula: \nCHANGE ~ 1 + THERAPY + GENDER + VISIT + BASVAL + THERAPY * VISIT + \n BASVAL * VISIT + us(VISIT | PATIENT)\nData: dat (used 608 observations from 172 subjects with maximum 4 \ntimepoints)\nCovariance: unstructured (10 variance parameters)\nMethod: Satterthwaite\nVcov Method: Asymptotic\nInference: REML\n\nModel selection criteria:\n AIC BIC logLik deviance \n 3512.9 3544.4 -1746.5 3492.9 \n\nCoefficients: \n Estimate Std. Error df t value Pr(>|t|) \n(Intercept) 3.16355 1.20260 168.64000 2.631 0.00931 ** \nTHERAPYDRUG 0.06603 0.68662 168.11000 0.096 0.92350 \nGENDERM 0.31961 0.68216 168.46000 0.469 0.64001 \nVISIT5 -0.50646 1.22706 157.16000 -0.413 0.68036 \nVISIT6 -0.39390 1.41983 149.35000 -0.277 0.78184 \nVISIT7 -2.29237 1.62198 142.91000 -1.413 0.15974 \nBASVAL -0.27866 0.06222 168.05000 -4.479 1.38e-05 ***\nTHERAPYDRUG:VISIT5 -1.49495 0.73342 156.86000 -2.038 0.04320 * \nTHERAPYDRUG:VISIT6 -2.31710 0.85860 151.23000 -2.699 0.00775 ** \nTHERAPYDRUG:VISIT7 -2.89468 0.96582 139.86000 -2.997 0.00323 ** \nVISIT5:BASVAL -0.03429 0.06567 157.48000 -0.522 0.60231 \nVISIT6:BASVAL -0.11482 0.07646 150.73000 -1.502 0.13527 \nVISIT7:BASVAL -0.04656 0.08679 142.04000 -0.537 0.59244 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCovariance estimate:\n 4 5 6 7\n4 19.7877 16.6237 15.4265 16.4578\n5 16.6237 34.3231 25.4682 26.2897\n6 15.4265 25.4682 38.4094 33.9331\n7 16.4578 26.2897 33.9331 45.3625\n```\n\n\n:::\n:::\n\n\nUsing the `emmeans` package/function least square means and contrast can be obtained.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nem = emmeans::emmeans(\n mmrm_fit,\n specs = trt.vs.ctrl ~ THERAPY * VISIT,\n at = list(VISIT = \"7\"),\n level = 0.95,\n adjust = \"none\",\n mode = \"df.error\"\n)\n\nem_contrast = broom::tidy(em$contrasts, conf.int = TRUE, conf.level = 0.95)\nem_contrast |>\n gt() |>\n fmt_number(decimals = 3)\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n \n
termcontrastnull.valueestimatestd.errordfconf.lowconf.highstatisticp.value
THERAPY*VISITDRUG VISIT7 - PLACEBO VISIT70.000−2.8291.117150.711−5.035−0.622−2.5330.012
\n
\n```\n\n:::\n:::\n\n\nThe treatment difference at visit 7 is of interest, and is estimated to be -2.829 (se=1.117) with 95% CI of [-5.035 to -0.622] (p=0.0123).\n\n\n## rbmi: MAR approach\nThe code presented here is based on the `rbmi` package [quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html).\n\n### Create needed datasets and specify imputation strategy\n`rbmi` expects its input dataset to be complete; that is, there must be one row per subject for each visit (note: in clinical trials ADAMs typically do not have this required complete data structure). Missing outcome values should be coded as `NA`, while missing covariate values are not allowed. If the dataset is incomplete, then the `expand_locf()` function can be used to add any missing rows, using LOCF imputation to carry forward the observed baseline covariate values to visits with missing outcomes.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_expand <- rbmi::expand_locf(\n dat,\n PATIENT = levels(dat$PATIENT), # expand by PATIENT and VISIT\n VISIT = levels(dat$VISIT),\n vars = c(\"BASVAL\", \"THERAPY\", \"GENDER\"), # complete covariates using LOCF\n group = c(\"PATIENT\"),\n order = c(\"PATIENT\", \"VISIT\") # sort\n)\n```\n:::\n\n\nFor example, the data of patient 1513 in the original data and expanded data are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n dplyr::filter(PATIENT == \"1513\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1513MDRUG7419245
\n
\n```\n\n:::\n\n```{.r .cell-code}\ndat_expand |>\n dplyr::filter(PATIENT == \"1513\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1513MDRUG7419245
1513MDRUGNA519NANA
1513MDRUGNA619NANA
1513MDRUGNA719NANA
\n
\n```\n\n:::\n:::\n\n\nNext, a dataset must be created specifying which data points should be imputed with the specified imputation strategy. The dataset `dat_ice` is created which specifies the first visit affected by an intercurrent event (ICE) and the imputation strategy for handling missing outcome data after the ICE. At most one ICE which is to be imputed is allowed per subject. In the example, the subject’s first visit affected by the ICE “study drug discontinuation” corresponds to the first terminal missing observation\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"MAR\")\n\ngt(head(dat_ice))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n \n
PATIENTVISITstrategy
15135MAR
15145MAR
15175MAR
18047MAR
21047MAR
21185MAR
\n
\n```\n\n:::\n:::\n\n\nIn this dataset, subject 3618 has an intermittent missing values which does not correspond to a study drug discontinuation. We therefore remove this subject from `dat_ice`. In the later imputation step, it will automatically be imputed under the default MAR assumption.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_ice[-which(dat_ice$PATIENT == 3618), ]\n```\n:::\n\n\n### Fit imputation model and draw posterior parameters\nThe `vars` object using using `set_vars()` defines the names of key variables in the dataset and the covariates included in the imputation model. If you wish to include interaction terms these need to be added in the covariates input.\n\nThe `method` object specifies the statistical method used to fit the imputation models and to create imputed datasets.\n\nThe `draws()` function fits the imputation model and stores the corresponding parameter estimates and Bayesian posterior parameter draws.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nvars <- rbmi::set_vars(\n outcome = \"CHANGE\",\n visit = \"VISIT\",\n subjid = \"PATIENT\",\n group = \"THERAPY\",\n covariates = c(\"GENDER\", \"BASVAL*VISIT\", \"THERAPY*VISIT\")\n)\n\nmethod <- rbmi::method_bayes(\n n_samples = 500,\n control = rbmi::control_bayes(warmup = 500, thin = 10)\n)\n\nset.seed(12345)\ndrawObj <- draws(\n data = dat_expand,\n data_ice = dat_ice,\n vars = vars,\n method = method,\n quiet = TRUE\n)\n\ndrawObj\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nDraws Object\n------------\nNumber of Samples: 500\nNumber of Failed Samples: 0\nModel Formula: CHANGE ~ 1 + THERAPY + VISIT + GENDER + BASVAL * VISIT + THERAPY * VISIT\nImputation Type: random\nMethod:\n name: Bayes\n covariance: us\n same_cov: TRUE\n n_samples: 500\n prior_cov: default\nControls:\n warmup: 500\n thin: 10\n chains: 1\n init: mmrm\n seed: 2245663\n```\n\n\n:::\n:::\n\n\n### Generate imputed datasets\n\nThe next step is to use the parameters from the imputation model to generate the imputed datasets. This is done via the `impute()` function. The function only has two key inputs: the imputation model output from `draws()` and the `references` groups relevant to reference-based imputation methods. Since we are using the MAR approach here, we can set it to NULL.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimputeObj <- rbmi::impute(draws = drawObj, references = NULL)\n```\n:::\n\n\nIn case we would like to access the imputed datasets, we can use the `extract_imputed_dfs()` function. For example, the imputed values in the 10th imputed dataset for patient 1513 are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimputed_dfs = rbmi::extract_imputed_dfs(imputeObj)\nMI_10 = imputed_dfs[[10]]\nMI_10$PATIENT_ID = dat_expand$PATIENT\n\nMI_10 |>\n dplyr::filter(PATIENT_ID == \"1513\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT_ID
new_pt_5MDRUG7419245.0000001513
new_pt_5MDRUGNA519NA4.1156421513
new_pt_5MDRUGNA619NA-8.2959281513
new_pt_5MDRUGNA719NA-8.7947781513
\n
\n```\n\n:::\n:::\n\n\n### Analyse imputed datasets\nThe next step is to run the analysis model on each imputed dataset. This is done by defining an analysis function and then calling the `analyse()` function to apply this function to each imputed dataset. The `ancova()` function provided by the `rbmi` package which fits a separate ANCOVA model for the outcomes from each visit is used.\n\nThe `ancova()` function uses the `set_vars()` function which determines the names of the key variables within the data and the covariates (in addition to the treatment group) for which the analysis model will be adjusted.\n\nNote: In Appendix 1 below we show how you can easily use a different analysis method (e.g., mmrm).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nvars_analyse <- rbmi::set_vars(\n outcome = \"CHANGE\",\n visit = \"VISIT\",\n subjid = \"PATIENT\",\n group = \"THERAPY\",\n covariates = c(\"BASVAL\", \"GENDER\")\n)\n\nanaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n vars = vars_analyse\n)\n```\n:::\n\n\n### Pool results\n\nFinally, the `pool()` function can be used to summarise the analysis results across multiple imputed datasets to provide an overall statistic with a standard error, confidence intervals and a p-value for the hypothesis test of the null hypothesis that the effect is equal to 0. Since we used `method_bayes()`, pooling and inference are based on Rubin’s rules.\n\nHere, the treatment difference at visit 7 is of interest. Since we set PLACEBO as the first factor in the variable `THERAPY` this corresponds to `ref`, whereas DRUG corresponds to `alt`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj <- rbmi::pool(anaObj, conf.level = 0.95, alternative = \"two.sided\")\n\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.8158561.1230680-5.036010-0.5957011.329608e-02
lsm_ref_7-4.8233090.7823422-6.369977-3.2766417.022688e-09
lsm_alt_7-7.6391650.7931535-9.206925-6.0714043.025813e-17
\n
\n```\n\n:::\n:::\n\n\n\n## rbmi: MNAR CR approach\nThe following changes need to be made in the code above to apply the Copy Reference (CR) approach in `rbmi`. For `dat_ice` the strategy need to be changed to CR. In the `impute()` step the `references` need to be specified. Here we set the reference for the DRUG group to PLACEBO.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"CR\")\n\nimputeObj <- rbmi::impute(\n drawObj,\n references = c(\"PLACEBO\" = \"PLACEBO\", \"DRUG\" = \"PLACEBO\")\n)\n```\n:::\n\n\n\n\nThe results for M=500 imputed datasets using the MNAR CR approach are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.3897801.1101922-4.583669-0.19589113.297369e-02
lsm_ref_7-4.8298340.7876150-6.386855-3.27281348.189424e-09
lsm_alt_7-7.2196140.7942274-8.789278-5.64995046.533123e-16
\n
\n```\n\n:::\n:::\n\n\n## rbmi: MNAR JR approach\nThe following changes need to be made in the code above to apply the Jump to Reference (JR) approach in `rbmi`. For `dat_ice` the strategy need to be changed to JR. In the `impute()` step the `references` need to be specified. Here we set the reference for the DRUG group to PLACEBO.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"JR\")\n\nimputeObj <- rbmi::impute(\n drawObj,\n references = c(\"PLACEBO\" = \"PLACEBO\", \"DRUG\" = \"PLACEBO\")\n)\n```\n:::\n\n\n\n\nThe results for M=500 imputed datasets using the MNAR JR approach are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.1420011.1307406-4.3769240.092921146.017815e-02
lsm_ref_7-4.8269990.7859242-6.380406-3.273592807.507619e-09
lsm_alt_7-6.9690010.8224064-8.595170-5.342831803.220372e-14
\n
\n```\n\n:::\n:::\n\n\n\n\n## rbmi: MNAR CIR approach\nThe following changes need to be made in the code above to apply the Copy Increments in Reference (CIR) approach in `rbmi`. For `dat_ice` the strategy need to be changed to CIR. In the `impute()` step the `references` need to be specified. Here we set the reference for the DRUG group to PLACEBO.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"CIR\")\n\nimputeObj <- rbmi::impute(\n drawObj,\n references = c(\"PLACEBO\" = \"PLACEBO\", \"DRUG\" = \"PLACEBO\")\n)\n```\n:::\n\n\n\n\nThe results for M=500 imputed datasets using the MNAR CIR approach are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.4733641.1062706-4.659453-0.2872742.686046e-02
lsm_ref_7-4.8198110.7792744-6.360108-3.2795146.080274e-09
lsm_alt_7-7.2931740.7932565-8.860947-5.7254023.597819e-16
\n
\n```\n\n:::\n:::\n\n\n\n## Summary of results\nIn the table we present the results of the different imputation strategies (and with varying number, *M*, of multiple imputation draws). Note that the results can be (slightly) different from the results above due to a possible different seed. The table show the contrast at Visit 7 between DRUG and PLACEBO [DRUG - PLACEBO]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|-------------------|----------|-------|------------------|---------|\n| Complete Case | -2.829 | 1.117 | -5.035 to -0.622 | 0.0123 |\n| MI - MAR (M=500) | -2.833 | 1.120 | -5.046 to -0.620 | 0.0125 |\n| MI - MAR (M=2000) | -2.837 | 1.118 | -5.047 to -0.627 | 0.0122 |\n| MI - MAR (M=5000) | -2.830 | 1.123 | -5.040 to -0.610 | 0.0128 |\n| MI - MNAR CR (M=500) | -2.377 | 1.119 | -4.588 to -0.167 | 0.0352 |\n| MI - MNAR CR (M=2000) | -2.391 | 1.110 | -4.585 to -0.198 | 0.0328 |\n| MI - MNAR CR (M=5000) | -2.394 | 1.112 | -4.592 to -0.197 | 0.0329 |\n| MI - MNAR JR (M=500) | -2.169 | 1.134 | -4.411 to 0.072 | 0.0577 |\n| MI - MNAR JR (M=2000) | -2.146 | 1.135 | -4.389 to 0.097 | 0.0606 |\n| MI - MNAR JR (M=5000) | -2.148 | 1.135 | -4.390 to 0.095 | 0.0603 |\n| MI - MNAR CIR (M=500) | -2.495 | 1.113 | -4.695 to -0.295 | 0.0265 |\n| MI - MNAR CIR (M=2000) | -2.469 | 1.116 | -4.674 to -0.263 | 0.0285 |\n| MI - MNAR CIR (M=5000) | -2.479 | 1.112 | -4.676 to -0.282 | 0.0273 |\n\n## Approximate Bayesian\n\nIn the `draws()` function it is possible to specify other methods. For example, the approximate Bayesian MI `method_approxbayes()` which is based on bootstrapping. `draws()` returns the draws from the posterior distribution of the parameters using an approximate Bayesian approach, where the sampling from the posterior distribution is simulated by fitting the MMRM model on bootstrap samples of the original dataset.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmethod <- rbmi::method_approxbayes(\n covariance = \"us\",\n threshold = 0.01,\n REML = TRUE,\n n_samples = 500\n)\n```\n:::\n\n\nIn the table we present the results of the approximate Bayesian approach for a CR imputation strategy. The table show the contrast at Visit 7 between DRUG and PLACEBO [DRUG - PLACEBO]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|-------------------|----------|-------|------------------|---------|\n| MI - MNAR CR (M=500) | -2.415 | 1.109 | -4.617 to -0.210 | 0.0320 |\n| MI - MNAR CR (M=2000) | -2.403 | 1.112 | -4.600 to -0.205 | 0.0323 |\n\n## Discussion\n\nA note on computational time: The total running time (including data loading, setting up data sets, MCMC run, imputing data and analysis MI data) for M=500 was about 26 seconds on a personal laptop. It increased to about 92 seconds for M=2000. Computational time was similar across different imputation strategies.\n\nWith a small number of `n_samples` in `method_bayes()` a warning could pop-up \"The largest R-hat is 1.08, indicating chains have not mixed. Running the chains for more iterations may help\". Increasing the number of `n_samples` will mostly solve this warning. For example, for this data example, this message is received when setting `n_samples` equal to a number below 100.\n\n## Appendix 1: mmrm as analysis model\n\nIn the `analyse()` function (at the moment of writing) the only available analysis function is `ancova`. However, the user is able to specify its own analysis function. See the `analyse()` function for more details.\n\nAnother possibility (although, not the most efficient) is to implement a for loop in which the model is fit on each imputed dataset. The obtained results could then be pooled using Rubin's rule. For example, suppose an MMRM should be fit on each imputed dataset:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm_analyse_mi_function <- function(Impute_Obj) {\n # create all imputed datasets\n imputed_dfs = rbmi::extract_imputed_dfs(Impute_Obj)\n\n # create empty vectors to store mmrm analysis results\n est_vec = sd_vec = df_vec = NULL\n\n # for loop to save estimates per imputation\n for (k in 1:length(imputed_dfs)) {\n temp_dat = imputed_dfs[[k]]\n mmrm_fit_temp = mmrm::mmrm(\n CHANGE ~\n 1 +\n THERAPY +\n VISIT +\n BASVAL * VISIT +\n THERAPY * VISIT +\n GENDER +\n us(VISIT | PATIENT),\n data = temp_dat,\n reml = TRUE\n )\n em = emmeans::emmeans(\n mmrm_fit_temp,\n specs = trt.vs.ctrl ~ THERAPY * VISIT,\n at = list(VISIT = \"7\"),\n level = 0.95,\n adjust = \"none\",\n mode = \"df.error\"\n )\n est_vec[k] = summary(em$contrasts)$estimate\n sd_vec[k] = summary(em$contrasts)$SE\n df_vec[k] = summary(em$contrasts)$df\n }\n\n # summarize results using rubin's rule\n rr = rbmi:::rubin_rules(ests = est_vec, ses = sd_vec, v_com = mean(df_vec))\n rr$se_t = sqrt(rr$var_t)\n rr$t.stat = rr$est_point / sqrt(rr$var_t)\n rr$p_value = 2 * pt(q = rr$t.stat, df = rr$df, lower.tail = TRUE)\n\n return(rr = rr)\n}\n```\n:::\n\n\nThe following code then performs the analysis and pooling\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm_analyse_mi_function(Impute_Obj = imputeObj)\n```\n:::\n\n\nIn the table we present the results of the Bayesian approach for a CR imputation strategy with an MMRM analysis model. The table show the contrast at Visit 7 between DRUG and PLACEBO [DRUG - PLACEBO]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|-------------------|----------|-------|------------------|---------|\n| MI - MNAR CR (M=500) | -2.415 | 1.109 | -4.607 to -0.223 | 0.0310 |\n| MI - MNAR CR (M=2000) | -2.388 | 1.111 | -4.584 to -0.193 | 0.0332 | \n\n\n## Reference\n\n[Carpenter JR, Roger JH & Kenward MG (2013)](https://doi.org/10.1080/10543406.2013.834911). Analysis of Longitudinal Trials with Protocol Deviation: A Framework for Relevant, Accessible Assumptions, and Inference via MI. *Journal of Biopharmaceutical Statistics* 23: 1352-1371.\n\n[Gower-Page C, Noci A & Wolbers M (2022)](https://doi.org/10.21105/joss.04251). rbmi: A R package for standard and reference-based multiple imputation methods. *Journal of Open Source Software* 7(74): 4251. \n\n[rbmi: Reference Based Multiple Imputation](https://cran.r-project.org/web/packages/rbmi/index.html)\n\n[rbmi: Quickstart](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html)\n\n[Roger J (2022, Dec 8)](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. *Addressing intercurrent events: Treatment policy and hypothetical strategies*. Joint EFSPI and BBS virtual event.\n\n[Rubin DB (1976)](https://doi.org/10.1093/biomet/63.3.581). Inference and Missing Data. *Biometrika* 63: 581–592.\n\n[Rubin DB (1987)](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696). *Multiple Imputation for Nonresponse in Surveys*. New York: John Wiley & Sons.\n\n[von Hippel PT & Bartlett JW (2021)](https://doi.org/10.1214/20-STS793). Maximum likelihood multiple imputation: Faster imputations and consistent standard errors without posterior draws. *Statistical Science* 36(3): 400–420. \n\n[Wolbers M, Noci A, Delmar P, Gower-Page C, Yiu S & Bartlett JW (2022)](https://onlinelibrary.wiley.com/doi/full/10.1002/pst.2234). Standard and reference-based conditional mean imputation. *Pharmaceutical Statistics* 21(6): 1246-1257.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P assertthat 0.2.1 2019-03-21 [?] RSPM\n P backports 1.5.0 2024-05-23 [?] RSPM\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.2)\n P broom 1.0.12 2026-01-27 [?] RSPM\n P callr 3.7.6 2024-03-25 [?] RSPM\n P checkmate 2.3.4 2026-02-03 [?] RSPM\n P cli 3.6.5 2025-04-23 [?] RSPM\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P curl 7.0.0 2025-08-19 [?] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P emmeans * 2.0.1 2025-12-16 [?] RSPM\n P estimability 1.5.1 2024-05-12 [?] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P farver 2.1.2 2024-05-13 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P forcats 1.0.1 2025-09-25 [?] RSPM\n P foreach 1.5.2 2022-02-02 [?] RSPM\n P fs 1.6.6 2025-04-12 [?] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P ggplot2 4.0.2 2026-02-03 [?] RSPM\n P glmnet 4.1-10 2025-07-17 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n P gridExtra 2.3 2017-09-09 [?] RSPM\n P gt * 1.3.0 2026-01-22 [?] RSPM\n P gtable 0.3.6 2024-10-25 [?] RSPM\n P haven 2.5.5 2025-05-30 [?] RSPM\n P hms 1.1.4 2025-10-17 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P inline 0.3.21 2025-01-09 [?] RSPM\n P iterators 1.0.14 2022-02-05 [?] RSPM\n P jinjar 0.3.2 2025-03-13 [?] RSPM\n P jomo 2.7-6 2023-04-15 [?] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n P labelled * 2.16.0 2025-10-22 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P lme4 1.1-38 2025-12-02 [?] RSPM\n P loo 2.9.0 2025-12-23 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P matrixStats 1.5.0 2025-01-07 [?] RSPM\n P mice * 3.19.0 2025-12-10 [?] RSPM\n P minqa 1.2.8 2024-08-17 [?] RSPM\n P mitml 0.4-5 2023-03-08 [?] RSPM\n P mmrm * 0.3.17 2026-01-08 [?] RSPM\n P multcomp 1.4-29 2025-10-20 [?] RSPM\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n P nloptr 2.2.1 2025-03-17 [?] RSPM\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM\n P pan 1.9 2023-12-07 [?] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgbuild 1.4.8 2025-05-26 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P processx 3.8.6 2025-02-21 [?] RSPM\n P ps 1.9.1 2025-04-12 [?] RSPM\n P purrr 1.2.1 2026-01-09 [?] RSPM\n P QuickJSR 1.9.0 2026-01-25 [?] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n P rbibutils 2.4.1 2026-01-21 [?] RSPM\n P rbmi * 1.6.0 2026-01-23 [?] RSPM\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM\n P Rcpp 1.1.1 2026-01-10 [?] RSPM\n P RcppParallel 5.1.11-1 2025-08-27 [?] RSPM\n P Rdpack 2.6.6 2026-02-08 [?] RSPM\n P reformulas 0.4.4 2026-02-02 [?] RSPM\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n rpart 4.1.24 2025-01-07 [2] CRAN (R 4.5.2)\n P rstan 2.32.7 2025-03-10 [?] RSPM\n P S7 0.2.1 2025-11-14 [?] RSPM\n P sandwich 3.1-1 2024-09-15 [?] RSPM\n P sass 0.4.10 2025-04-11 [?] RSPM\n P scales 1.4.0 2025-04-24 [?] RSPM\n P sessioninfo 1.2.3 2025-02-05 [?] RSPM\n P shape 1.4.6.1 2024-02-23 [?] RSPM\n P StanHeaders 2.32.10 2024-07-15 [?] RSPM\n P stringi 1.8.7 2025-03-27 [?] RSPM\n P stringr 1.6.0 2025-11-04 [?] RSPM\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P TH.data 1.1-5 2025-11-17 [?] RSPM\n P tibble 3.3.1 2026-01-11 [?] RSPM\n P tidyr * 1.3.2 2025-12-19 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P TMB 1.9.19 2025-12-15 [?] RSPM\n P utf8 1.2.6 2025-06-08 [?] RSPM\n P V8 8.0.1 2025-10-10 [?] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n P xml2 1.5.2 2026-01-17 [?] RSPM\n P xtable 1.8-4 2019-04-21 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n P zoo 1.8-15 2025-12-15 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n", + "markdown": "---\ntitle: \"Reference-Based Multiple Imputation (joint modelling): Continuous Data\"\n---\n\n## Libraries\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# General\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(gt)\nlibrary(labelled)\n\n# Methodlolgy specific\nlibrary(mmrm)\nlibrary(emmeans)\nlibrary(rbmi)\nlibrary(mice) # only used md.pattern()\n```\n:::\n\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\nReference-based multiple imputation methods have become popular for handling missing data, as well as for conducting sensitivity analyses, in randomized clinical trials. In the context of a repeatedly measured continuous endpoint assuming a multivariate normal model, [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911) proposed a framework to extend the usual MAR-based MI approach by postulating assumptions about the joint distribution of pre- and post-deviation data. Under this framework, one makes qualitative assumptions about how individuals’ missing outcomes relate to those observed in relevant groups in the trial, based on plausible clinical scenarios. Statistical analysis then proceeds using the method of multiple imputation ([Rubin 1976](https://doi.org/10.1093/biomet/63.3.581), [Rubin 1987]((https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696))).\n\nIn general, multiple imputation of a repeatedly measured continuous outcome can be done via 2 computational routes ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)):\n\n 1. Stepwise: split problem into separate imputations of data at each visit\n \n + requires monotone missingness, such as missingness due to withdrawal\n \n + conditions on the imputed values at previous visit\n \n + Bayesian linear regression problem is much simpler with monotone missing, as one can sample directly using conjugate priors\n\n 2. One-step approach (joint modelling): Fit a Bayesian full multivariate normal repeated measures model using MCMC and then draw a sample.\n\nHere, we illustrate reference-based multiple imputation of a continuous outcome measured repeatedly via the so-called one-step approach.\n\n### rbmi package\nThe `rbmi` package [Gower-Page et al. (2022)](https://joss.theoj.org/papers/10.21105/joss.04251) will be used for the one-step approach of the reference-based multiple imputation using R. The package implements standard and reference based multiple imputation methods for continuous longitudinal endpoints . In particular, this package supports deterministic conditional mean imputation and jackknifing as described in [Wolbers et al. (2022)](https://onlinelibrary.wiley.com/doi/full/10.1002/pst.2234), convential MI based on Bayesian posterior draws as described in [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911), and bootstrapped maximum likelihood imputation as described in [von Hippel and Bartlett (2021)](https://doi.org/10.1214/20-STS793).\n\nThe following standard and reference-based multiple imputation approaches will be illustrated here: \n \n * MAR (Missing At Random)\n \n * CIR (Copy Increment from Reference)\n \n * J2R (Jump to Reference)\n \n * CR (Copy Reference)\n\n\n## Data used\nA publicly available example [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial of an active drug versus placebo is used. Overall, data of 172 patients is available with 88 patients receiving placebo and 84 receiving active drug. This data is also used in the `rbmi` package [quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html).\n\nThe relevant endpoint is the Hamilton 17-item depression rating scale (HAMD17) which was assessed at baseline and at weeks 1, 2, 4, and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects from the active drug and 26% (23/88) of subjects from placebo. All data after study drug discontinuation are missing. \n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"antidepressant_data\")\ndat <- antidepressant_data |>\n dplyr::select(\n PATIENT,\n GENDER,\n THERAPY,\n RELDAYS,\n VISIT,\n BASVAL,\n HAMDTL17,\n CHANGE\n ) |>\n dplyr::mutate(THERAPY = factor(THERAPY, levels = c(\"PLACEBO\", \"DRUG\"))) |>\n labelled::remove_labels()\n\ngt(head(dat, n = 10))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1503FDRUG743221-11
1503FDRUG1453220-12
1503FDRUG2863219-13
1503FDRUG4273217-15
1507FPLACEBO741411-3
1507FPLACEBO15514140
1507FPLACEBO296149-5
1507FPLACEBO427145-9
1509FDRUG742120-1
1509FDRUG1452118-3
\n
\n```\n\n:::\n:::\n\n\nThe number of patients per visit and arm are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n group_by(VISIT, THERAPY) |>\n dplyr::summarise(N = n())\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n`summarise()` has regrouped the output.\nℹ Summaries were computed grouped by VISIT and THERAPY.\nℹ Output is grouped by VISIT.\nℹ Use `summarise(.groups = \"drop_last\")` to silence this message.\nℹ Use `summarise(.by = c(VISIT, THERAPY))` for per-operation grouping\n (`?dplyr::dplyr_by`) instead.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 3\n# Groups: VISIT [4]\n VISIT THERAPY N\n \n1 4 PLACEBO 88\n2 4 DRUG 84\n3 5 PLACEBO 81\n4 5 DRUG 77\n5 6 PLACEBO 76\n6 6 DRUG 73\n7 7 PLACEBO 65\n8 7 DRUG 64\n```\n\n\n:::\n:::\n\n\nThe mean change from baseline of the endpoint (Hamilton 17-item depression rating scale, HAMD17) per visit per treatment group using only the complete cases are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n group_by(VISIT, THERAPY) |>\n dplyr::summarise(N = n(), MEAN = mean(CHANGE))\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n`summarise()` has regrouped the output.\nℹ Summaries were computed grouped by VISIT and THERAPY.\nℹ Output is grouped by VISIT.\nℹ Use `summarise(.groups = \"drop_last\")` to silence this message.\nℹ Use `summarise(.by = c(VISIT, THERAPY))` for per-operation grouping\n (`?dplyr::dplyr_by`) instead.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 4\n# Groups: VISIT [4]\n VISIT THERAPY N MEAN\n \n1 4 PLACEBO 88 -1.51\n2 4 DRUG 84 -1.82\n3 5 PLACEBO 81 -2.70\n4 5 DRUG 77 -4.71\n5 6 PLACEBO 76 -4.07\n6 6 DRUG 73 -6.79\n7 7 PLACEBO 65 -5.14\n8 7 DRUG 64 -8.34\n```\n\n\n:::\n:::\n\n\nThe missingness pattern is show below (1=observed data point (blue), 0=missing data point (red)). The incomplete data is primarily monotone in nature. 128 patients have complete data for all visits (all 1's at each visit). 20, 10 and 13 patients have 1, 2 or 3 monotone missing data, respectively. Further, there is a single additional intermittent missing observation (patient 3618).\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_wide = dat |>\n dplyr::select(PATIENT, VISIT, CHANGE) |>\n pivot_wider(\n id_cols = PATIENT,\n names_from = VISIT,\n names_prefix = \"VISIT_\",\n values_from = CHANGE\n )\n\ndat_wide |>\n dplyr::select(starts_with(\"VISIT_\")) |>\n mice::md.pattern(plot = TRUE, rotate.names = TRUE)\n```\n\n::: {.cell-output-display}\n![](rbmi_continuous_joint_files/figure-html/explore data 2-1.png){width=672}\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n VISIT_4 VISIT_5 VISIT_6 VISIT_7 \n128 1 1 1 1 0\n20 1 1 1 0 1\n10 1 1 0 0 2\n1 1 0 1 1 1\n13 1 0 0 0 3\n 0 14 23 43 80\n```\n\n\n:::\n:::\n\n\n## Complete case analysis\n\nA complete case analysis is performed using mixed model for repeated measures (MMRM) with covariates: treatment [THERAPY], gender [GENDER], visit [VISIT] as factors; baseline score [BASVAL] as continuous; and visit-by-treatment [THERAPY * VISIT] interaction, and visit-by-baseline [BASVAL * VISIT] interaction. An unstructured covariance matrix is used.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm_fit = mmrm::mmrm(\n CHANGE ~\n 1 +\n THERAPY +\n GENDER +\n VISIT +\n BASVAL +\n THERAPY * VISIT +\n BASVAL * VISIT +\n us(VISIT | PATIENT),\n data = dat,\n reml = TRUE\n)\nsummary(mmrm_fit)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nmmrm fit\n\nFormula: \nCHANGE ~ 1 + THERAPY + GENDER + VISIT + BASVAL + THERAPY * VISIT + \n BASVAL * VISIT + us(VISIT | PATIENT)\nData: dat (used 608 observations from 172 subjects with maximum 4 \ntimepoints)\nCovariance: unstructured (10 variance parameters)\nMethod: Satterthwaite\nVcov Method: Asymptotic\nInference: REML\n\nModel selection criteria:\n AIC BIC logLik deviance \n 3512.9 3544.4 -1746.5 3492.9 \n\nCoefficients: \n Estimate Std. Error df t value Pr(>|t|) \n(Intercept) 3.16355 1.20260 168.64000 2.631 0.00931 ** \nTHERAPYDRUG 0.06603 0.68662 168.11000 0.096 0.92350 \nGENDERM 0.31961 0.68216 168.46000 0.469 0.64001 \nVISIT5 -0.50646 1.22706 157.16000 -0.413 0.68036 \nVISIT6 -0.39390 1.41983 149.35000 -0.277 0.78184 \nVISIT7 -2.29237 1.62198 142.91000 -1.413 0.15974 \nBASVAL -0.27866 0.06222 168.05000 -4.479 1.38e-05 ***\nTHERAPYDRUG:VISIT5 -1.49495 0.73342 156.86000 -2.038 0.04320 * \nTHERAPYDRUG:VISIT6 -2.31710 0.85860 151.23000 -2.699 0.00775 ** \nTHERAPYDRUG:VISIT7 -2.89468 0.96582 139.86000 -2.997 0.00323 ** \nVISIT5:BASVAL -0.03429 0.06567 157.48000 -0.522 0.60231 \nVISIT6:BASVAL -0.11482 0.07646 150.73000 -1.502 0.13527 \nVISIT7:BASVAL -0.04656 0.08679 142.04000 -0.537 0.59244 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nCovariance estimate:\n 4 5 6 7\n4 19.7877 16.6237 15.4265 16.4578\n5 16.6237 34.3231 25.4682 26.2897\n6 15.4265 25.4682 38.4094 33.9331\n7 16.4578 26.2897 33.9331 45.3625\n```\n\n\n:::\n:::\n\n\nUsing the `emmeans` package/function least square means and contrast can be obtained.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nem = emmeans::emmeans(\n mmrm_fit,\n specs = trt.vs.ctrl ~ THERAPY * VISIT,\n at = list(VISIT = \"7\"),\n level = 0.95,\n adjust = \"none\",\n mode = \"df.error\"\n)\n\nem_contrast = broom::tidy(em$contrasts, conf.int = TRUE, conf.level = 0.95)\nem_contrast |>\n gt() |>\n fmt_number(decimals = 3)\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n \n
termcontrastnull.valueestimatestd.errordfconf.lowconf.highstatisticp.value
THERAPY*VISITDRUG VISIT7 - PLACEBO VISIT70.000−2.8291.117150.711−5.035−0.622−2.5330.012
\n
\n```\n\n:::\n:::\n\n\nThe treatment difference at visit 7 is of interest, and is estimated to be -2.829 (se=1.117) with 95% CI of [-5.035 to -0.622] (p=0.0123).\n\n\n## rbmi: MAR approach\nThe code presented here is based on the `rbmi` package [quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html).\n\n### Create needed datasets and specify imputation strategy\n`rbmi` expects its input dataset to be complete; that is, there must be one row per subject for each visit (note: in clinical trials ADAMs typically do not have this required complete data structure). Missing outcome values should be coded as `NA`, while missing covariate values are not allowed. If the dataset is incomplete, then the `expand_locf()` function can be used to add any missing rows, using LOCF imputation to carry forward the observed baseline covariate values to visits with missing outcomes.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_expand <- rbmi::expand_locf(\n dat,\n PATIENT = levels(dat$PATIENT), # expand by PATIENT and VISIT\n VISIT = levels(dat$VISIT),\n vars = c(\"BASVAL\", \"THERAPY\", \"GENDER\"), # complete covariates using LOCF\n group = c(\"PATIENT\"),\n order = c(\"PATIENT\", \"VISIT\") # sort\n)\n```\n:::\n\n\nFor example, the data of patient 1513 in the original data and expanded data are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n dplyr::filter(PATIENT == \"1513\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1513MDRUG7419245
\n
\n```\n\n:::\n\n```{.r .cell-code}\ndat_expand |>\n dplyr::filter(PATIENT == \"1513\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1513MDRUG7419245
1513MDRUGNA519NANA
1513MDRUGNA619NANA
1513MDRUGNA719NANA
\n
\n```\n\n:::\n:::\n\n\nNext, a dataset must be created specifying which data points should be imputed with the specified imputation strategy. The dataset `dat_ice` is created which specifies the first visit affected by an intercurrent event (ICE) and the imputation strategy for handling missing outcome data after the ICE. At most one ICE which is to be imputed is allowed per subject. In the example, the subject’s first visit affected by the ICE “study drug discontinuation” corresponds to the first terminal missing observation\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"MAR\")\n\ngt(head(dat_ice))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n \n
PATIENTVISITstrategy
15135MAR
15145MAR
15175MAR
18047MAR
21047MAR
21185MAR
\n
\n```\n\n:::\n:::\n\n\nIn this dataset, subject 3618 has an intermittent missing values which does not correspond to a study drug discontinuation. We therefore remove this subject from `dat_ice`. In the later imputation step, it will automatically be imputed under the default MAR assumption.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_ice[-which(dat_ice$PATIENT == 3618), ]\n```\n:::\n\n\n### Fit imputation model and draw posterior parameters\nThe `vars` object using using `set_vars()` defines the names of key variables in the dataset and the covariates included in the imputation model. If you wish to include interaction terms these need to be added in the covariates input.\n\nThe `method` object specifies the statistical method used to fit the imputation models and to create imputed datasets.\n\nThe `draws()` function fits the imputation model and stores the corresponding parameter estimates and Bayesian posterior parameter draws.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nvars <- rbmi::set_vars(\n outcome = \"CHANGE\",\n visit = \"VISIT\",\n subjid = \"PATIENT\",\n group = \"THERAPY\",\n covariates = c(\"GENDER\", \"BASVAL*VISIT\", \"THERAPY*VISIT\")\n)\n\nmethod <- rbmi::method_bayes(\n n_samples = 500,\n control = rbmi::control_bayes(warmup = 500, thin = 10)\n)\n\nset.seed(12345)\ndrawObj <- draws(\n data = dat_expand,\n data_ice = dat_ice,\n vars = vars,\n method = method,\n quiet = TRUE\n)\n\ndrawObj\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nDraws Object\n------------\nNumber of Samples: 500\nNumber of Failed Samples: 0\nModel Formula: CHANGE ~ 1 + THERAPY + VISIT + GENDER + BASVAL * VISIT + THERAPY * VISIT\nImputation Type: random\nMethod:\n name: Bayes\n covariance: us\n same_cov: TRUE\n n_samples: 500\n prior_cov: default\nControls:\n warmup: 500\n thin: 10\n chains: 1\n init: mmrm\n seed: 824960212\n```\n\n\n:::\n:::\n\n\n### Generate imputed datasets\n\nThe next step is to use the parameters from the imputation model to generate the imputed datasets. This is done via the `impute()` function. The function only has two key inputs: the imputation model output from `draws()` and the `references` groups relevant to reference-based imputation methods. Since we are using the MAR approach here, we can set it to NULL.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimputeObj <- rbmi::impute(draws = drawObj, references = NULL)\n```\n:::\n\n\nIn case we would like to access the imputed datasets, we can use the `extract_imputed_dfs()` function. For example, the imputed values in the 10th imputed dataset for patient 1513 are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimputed_dfs = rbmi::extract_imputed_dfs(imputeObj)\nMI_10 = imputed_dfs[[10]]\nMI_10$PATIENT_ID = dat_expand$PATIENT\n\nMI_10 |>\n dplyr::filter(PATIENT_ID == \"1513\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT_ID
new_pt_5MDRUG7419245.0000001513
new_pt_5MDRUGNA519NA4.1523211513
new_pt_5MDRUGNA619NA-11.1876461513
new_pt_5MDRUGNA719NA-10.2870141513
\n
\n```\n\n:::\n:::\n\n\n### Analyse imputed datasets\nThe next step is to run the analysis model on each imputed dataset. This is done by defining an analysis function and then calling the `analyse()` function to apply this function to each imputed dataset. The `ancova()` function provided by the `rbmi` package which fits a separate ANCOVA model for the outcomes from each visit is used.\n\nThe `ancova()` function uses the `set_vars()` function which determines the names of the key variables within the data and the covariates (in addition to the treatment group) for which the analysis model will be adjusted.\n\nNote: In Appendix 1 below we show how you can easily use a different analysis method (e.g., mmrm).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nvars_analyse <- rbmi::set_vars(\n outcome = \"CHANGE\",\n visit = \"VISIT\",\n subjid = \"PATIENT\",\n group = \"THERAPY\",\n covariates = c(\"BASVAL\", \"GENDER\")\n)\n\nanaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n vars = vars_analyse\n)\n```\n:::\n\n\n### Pool results\n\nFinally, the `pool()` function can be used to summarise the analysis results across multiple imputed datasets to provide an overall statistic with a standard error, confidence intervals and a p-value for the hypothesis test of the null hypothesis that the effect is equal to 0. Since we used `method_bayes()`, pooling and inference are based on Rubin’s rules.\n\nHere, the treatment difference at visit 7 is of interest. Since we set PLACEBO as the first factor in the variable `THERAPY` this corresponds to `ref`, whereas DRUG corresponds to `alt`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj <- rbmi::pool(anaObj, conf.level = 0.95, alternative = \"two.sided\")\n\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.8168901.1167760-5.024381-0.60939751.275097e-02
lsm_ref_7-4.8150830.7780929-6.353197-3.27696876.118720e-09
lsm_alt_7-7.6319720.7908552-9.195110-6.06883502.580413e-17
\n
\n```\n\n:::\n:::\n\n\n\n## rbmi: MNAR CR approach\nThe following changes need to be made in the code above to apply the Copy Reference (CR) approach in `rbmi`. For `dat_ice` the strategy need to be changed to CR. In the `impute()` step the `references` need to be specified. Here we set the reference for the DRUG group to PLACEBO.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"CR\")\n\nimputeObj <- rbmi::impute(\n drawObj,\n references = c(\"PLACEBO\" = \"PLACEBO\", \"DRUG\" = \"PLACEBO\")\n)\n```\n:::\n\n\n\n\nThe results for M=500 imputed datasets using the MNAR CR approach are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.3933181.1092180-4.585247-0.20138973.256562e-02
lsm_ref_7-4.8176610.7796982-6.358748-3.27657406.224847e-09
lsm_alt_7-7.2109790.7955835-8.783369-5.63858917.801168e-16
\n
\n```\n\n:::\n:::\n\n\n## rbmi: MNAR JR approach\nThe following changes need to be made in the code above to apply the Jump to Reference (JR) approach in `rbmi`. For `dat_ice` the strategy need to be changed to JR. In the `impute()` step the `references` need to be specified. Here we set the reference for the DRUG group to PLACEBO.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"JR\")\n\nimputeObj <- rbmi::impute(\n drawObj,\n references = c(\"PLACEBO\" = \"PLACEBO\", \"DRUG\" = \"PLACEBO\")\n)\n```\n:::\n\n\n\n\nThe results for M=500 imputed datasets using the MNAR JR approach are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.1434371.1269939-4.3708220.083947795.915888e-02
lsm_ref_7-4.8217020.7905712-6.384460-3.258944779.511139e-09
lsm_alt_7-6.9651390.8185475-8.583534-5.346744322.496563e-14
\n
\n```\n\n:::\n:::\n\n\n\n\n## rbmi: MNAR CIR approach\nThe following changes need to be made in the code above to apply the Copy Increments in Reference (CIR) approach in `rbmi`. For `dat_ice` the strategy need to be changed to CIR. In the `impute()` step the `references` need to be specified. Here we set the reference for the DRUG group to PLACEBO.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_ice <- dat_expand |>\n arrange(PATIENT, VISIT) |>\n filter(is.na(CHANGE)) |>\n group_by(PATIENT) |>\n slice(1) |>\n ungroup() |>\n select(PATIENT, VISIT) |>\n mutate(strategy = \"CIR\")\n\nimputeObj <- rbmi::impute(\n drawObj,\n references = c(\"PLACEBO\" = \"PLACEBO\", \"DRUG\" = \"PLACEBO\")\n)\n```\n:::\n\n\n\n\nThe results for M=500 imputed datasets using the MNAR CIR approach are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-2.4743821.1080177-4.663971-0.28479182.703976e-02
lsm_ref_7-4.8145500.7842418-6.364835-3.26426567.837117e-09
lsm_alt_7-7.2889320.7952036-8.860613-5.71725164.368826e-16
\n
\n```\n\n:::\n:::\n\n\n\n## Summary of results\nIn the table we present the results of the different imputation strategies (and with varying number, *M*, of multiple imputation draws). Note that the results can be (slightly) different from the results above due to a possible different seed. The table show the contrast at Visit 7 between DRUG and PLACEBO [DRUG - PLACEBO]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|-------------------|----------|-------|------------------|---------|\n| Complete Case | -2.829 | 1.117 | -5.035 to -0.622 | 0.0123 |\n| MI - MAR (M=500) | -2.833 | 1.120 | -5.046 to -0.620 | 0.0125 |\n| MI - MAR (M=2000) | -2.837 | 1.118 | -5.047 to -0.627 | 0.0122 |\n| MI - MAR (M=5000) | -2.830 | 1.123 | -5.040 to -0.610 | 0.0128 |\n| MI - MNAR CR (M=500) | -2.377 | 1.119 | -4.588 to -0.167 | 0.0352 |\n| MI - MNAR CR (M=2000) | -2.391 | 1.110 | -4.585 to -0.198 | 0.0328 |\n| MI - MNAR CR (M=5000) | -2.394 | 1.112 | -4.592 to -0.197 | 0.0329 |\n| MI - MNAR JR (M=500) | -2.169 | 1.134 | -4.411 to 0.072 | 0.0577 |\n| MI - MNAR JR (M=2000) | -2.146 | 1.135 | -4.389 to 0.097 | 0.0606 |\n| MI - MNAR JR (M=5000) | -2.148 | 1.135 | -4.390 to 0.095 | 0.0603 |\n| MI - MNAR CIR (M=500) | -2.495 | 1.113 | -4.695 to -0.295 | 0.0265 |\n| MI - MNAR CIR (M=2000) | -2.469 | 1.116 | -4.674 to -0.263 | 0.0285 |\n| MI - MNAR CIR (M=5000) | -2.479 | 1.112 | -4.676 to -0.282 | 0.0273 |\n\n## Approximate Bayesian\n\nIn the `draws()` function it is possible to specify other methods. For example, the approximate Bayesian MI `method_approxbayes()` which is based on bootstrapping. `draws()` returns the draws from the posterior distribution of the parameters using an approximate Bayesian approach, where the sampling from the posterior distribution is simulated by fitting the MMRM model on bootstrap samples of the original dataset.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmethod <- rbmi::method_approxbayes(\n covariance = \"us\",\n threshold = 0.01,\n REML = TRUE,\n n_samples = 500\n)\n```\n:::\n\n\nIn the table we present the results of the approximate Bayesian approach for a CR imputation strategy. The table show the contrast at Visit 7 between DRUG and PLACEBO [DRUG - PLACEBO]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|-------------------|----------|-------|------------------|---------|\n| MI - MNAR CR (M=500) | -2.415 | 1.109 | -4.617 to -0.210 | 0.0320 |\n| MI - MNAR CR (M=2000) | -2.403 | 1.112 | -4.600 to -0.205 | 0.0323 |\n\n## Discussion\n\nA note on computational time: The total running time (including data loading, setting up data sets, MCMC run, imputing data and analysis MI data) for M=500 was about 26 seconds on a personal laptop. It increased to about 92 seconds for M=2000. Computational time was similar across different imputation strategies.\n\nWith a small number of `n_samples` in `method_bayes()` a warning could pop-up \"The largest R-hat is 1.08, indicating chains have not mixed. Running the chains for more iterations may help\". Increasing the number of `n_samples` will mostly solve this warning. For example, for this data example, this message is received when setting `n_samples` equal to a number below 100.\n\n## Appendix 1: mmrm as analysis model\n\nIn the `analyse()` function (at the moment of writing) the only available analysis function is `ancova`. However, the user is able to specify its own analysis function. See the `analyse()` function for more details.\n\nAnother possibility (although, not the most efficient) is to implement a for loop in which the model is fit on each imputed dataset. The obtained results could then be pooled using Rubin's rule. For example, suppose an MMRM should be fit on each imputed dataset:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm_analyse_mi_function <- function(Impute_Obj) {\n # create all imputed datasets\n imputed_dfs = rbmi::extract_imputed_dfs(Impute_Obj)\n\n # create empty vectors to store mmrm analysis results\n est_vec = sd_vec = df_vec = NULL\n\n # for loop to save estimates per imputation\n for (k in 1:length(imputed_dfs)) {\n temp_dat = imputed_dfs[[k]]\n mmrm_fit_temp = mmrm::mmrm(\n CHANGE ~\n 1 +\n THERAPY +\n VISIT +\n BASVAL * VISIT +\n THERAPY * VISIT +\n GENDER +\n us(VISIT | PATIENT),\n data = temp_dat,\n reml = TRUE\n )\n em = emmeans::emmeans(\n mmrm_fit_temp,\n specs = trt.vs.ctrl ~ THERAPY * VISIT,\n at = list(VISIT = \"7\"),\n level = 0.95,\n adjust = \"none\",\n mode = \"df.error\"\n )\n est_vec[k] = summary(em$contrasts)$estimate\n sd_vec[k] = summary(em$contrasts)$SE\n df_vec[k] = summary(em$contrasts)$df\n }\n\n # summarize results using rubin's rule\n rr = rbmi:::rubin_rules(ests = est_vec, ses = sd_vec, v_com = mean(df_vec))\n rr$se_t = sqrt(rr$var_t)\n rr$t.stat = rr$est_point / sqrt(rr$var_t)\n rr$p_value = 2 * pt(q = rr$t.stat, df = rr$df, lower.tail = TRUE)\n\n return(rr = rr)\n}\n```\n:::\n\n\nThe following code then performs the analysis and pooling\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmmrm_analyse_mi_function(Impute_Obj = imputeObj)\n```\n:::\n\n\nIn the table we present the results of the Bayesian approach for a CR imputation strategy with an MMRM analysis model. The table show the contrast at Visit 7 between DRUG and PLACEBO [DRUG - PLACEBO]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|-------------------|----------|-------|------------------|---------|\n| MI - MNAR CR (M=500) | -2.415 | 1.109 | -4.607 to -0.223 | 0.0310 |\n| MI - MNAR CR (M=2000) | -2.388 | 1.111 | -4.584 to -0.193 | 0.0332 | \n\n\n## Reference\n\n[Carpenter JR, Roger JH & Kenward MG (2013)](https://doi.org/10.1080/10543406.2013.834911). Analysis of Longitudinal Trials with Protocol Deviation: A Framework for Relevant, Accessible Assumptions, and Inference via MI. *Journal of Biopharmaceutical Statistics* 23: 1352-1371.\n\n[Gower-Page C, Noci A & Wolbers M (2022)](https://doi.org/10.21105/joss.04251). rbmi: A R package for standard and reference-based multiple imputation methods. *Journal of Open Source Software* 7(74): 4251. \n\n[rbmi: Reference Based Multiple Imputation](https://cran.r-project.org/web/packages/rbmi/index.html)\n\n[rbmi: Quickstart](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html)\n\n[Roger J (2022, Dec 8)](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. *Addressing intercurrent events: Treatment policy and hypothetical strategies*. Joint EFSPI and BBS virtual event.\n\n[Rubin DB (1976)](https://doi.org/10.1093/biomet/63.3.581). Inference and Missing Data. *Biometrika* 63: 581–592.\n\n[Rubin DB (1987)](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696). *Multiple Imputation for Nonresponse in Surveys*. New York: John Wiley & Sons.\n\n[von Hippel PT & Bartlett JW (2021)](https://doi.org/10.1214/20-STS793). Maximum likelihood multiple imputation: Faster imputations and consistent standard errors without posterior draws. *Statistical Science* 36(3): 400–420. \n\n[Wolbers M, Noci A, Delmar P, Gower-Page C, Yiu S & Bartlett JW (2022)](https://onlinelibrary.wiley.com/doi/full/10.1002/pst.2234). Standard and reference-based conditional mean imputation. *Pharmaceutical Statistics* 21(6): 1246-1257.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P assertthat 0.2.1 2019-03-21 [?] RSPM (R 4.5.0)\n P backports 1.5.0 2024-05-23 [?] RSPM (R 4.5.0)\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.2)\n P broom 1.0.12 2026-01-27 [?] RSPM (R 4.5.0)\n P callr 3.7.6 2024-03-25 [?] RSPM (R 4.5.0)\n P checkmate 2.3.4 2026-02-03 [?] RSPM (R 4.5.0)\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P curl 7.0.0 2025-08-19 [?] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P emmeans * 2.0.1 2025-12-16 [?] RSPM (R 4.5.0)\n P estimability 1.5.1 2024-05-12 [?] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P farver 2.1.2 2024-05-13 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n P forcats 1.0.1 2025-09-25 [?] RSPM (R 4.5.0)\n P foreach 1.5.2 2022-02-02 [?] RSPM (R 4.5.0)\n P fs 1.6.6 2025-04-12 [?] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P ggplot2 4.0.2 2026-02-03 [?] RSPM (R 4.5.0)\n P glmnet 4.1-10 2025-07-17 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n P gridExtra 2.3 2017-09-09 [?] RSPM (R 4.5.0)\n P gt * 1.3.0 2026-01-22 [?] RSPM (R 4.5.0)\n P gtable 0.3.6 2024-10-25 [?] RSPM (R 4.5.0)\n P haven 2.5.5 2025-05-30 [?] RSPM (R 4.5.0)\n P hms 1.1.4 2025-10-17 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.5.0)\n P inline 0.3.21 2025-01-09 [?] RSPM (R 4.5.0)\n P iterators 1.0.14 2022-02-05 [?] RSPM (R 4.5.0)\n P jinjar 0.3.2 2025-03-13 [?] RSPM (R 4.5.0)\n P jomo 2.7-6 2023-04-15 [?] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P labelled * 2.16.0 2025-10-22 [?] RSPM (R 4.5.0)\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P lme4 1.1-38 2025-12-02 [?] RSPM (R 4.5.0)\n P loo 2.9.0 2025-12-23 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P matrixStats 1.5.0 2025-01-07 [?] RSPM (R 4.5.0)\n P mice * 3.19.0 2025-12-10 [?] RSPM (R 4.5.0)\n P minqa 1.2.8 2024-08-17 [?] RSPM (R 4.5.0)\n P mitml 0.4-5 2023-03-08 [?] RSPM (R 4.5.0)\n P mmrm * 0.3.17 2026-01-08 [?] RSPM (R 4.5.0)\n P multcomp 1.4-29 2025-10-20 [?] RSPM (R 4.5.0)\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM (R 4.5.0)\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n P nloptr 2.2.1 2025-03-17 [?] RSPM (R 4.5.0)\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM (R 4.5.0)\n P pan 1.9 2023-12-07 [?] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgbuild 1.4.8 2025-05-26 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P processx 3.8.6 2025-02-21 [?] RSPM (R 4.5.0)\n P ps 1.9.1 2025-04-12 [?] RSPM (R 4.5.0)\n P purrr 1.2.1 2026-01-09 [?] RSPM (R 4.5.0)\n P QuickJSR 1.9.0 2026-01-25 [?] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n P rbibutils 2.4.1 2026-01-21 [?] RSPM (R 4.5.0)\n P rbmi * 1.6.0 2026-01-23 [?] RSPM (R 4.5.0)\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM (R 4.5.0)\n P Rcpp 1.1.1 2026-01-10 [?] RSPM (R 4.5.0)\n P RcppParallel 5.1.11-1 2025-08-27 [?] RSPM (R 4.5.0)\n P Rdpack 2.6.6 2026-02-08 [?] RSPM (R 4.5.0)\n P reformulas 0.4.4 2026-02-02 [?] RSPM (R 4.5.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n rpart 4.1.24 2025-01-07 [2] CRAN (R 4.5.2)\n P rstan 2.32.7 2025-03-10 [?] RSPM (R 4.5.0)\n P S7 0.2.1 2025-11-14 [?] RSPM (R 4.5.0)\n P sandwich 3.1-1 2024-09-15 [?] RSPM (R 4.5.0)\n P sass 0.4.10 2025-04-11 [?] RSPM (R 4.5.0)\n P scales 1.4.0 2025-04-24 [?] RSPM (R 4.5.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.5.0)\n P shape 1.4.6.1 2024-02-23 [?] RSPM (R 4.5.0)\n P StanHeaders 2.32.10 2024-07-15 [?] RSPM (R 4.5.0)\n P stringi 1.8.7 2025-03-27 [?] RSPM (R 4.5.0)\n P stringr 1.6.0 2025-11-04 [?] RSPM (R 4.5.0)\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P TH.data 1.1-5 2025-11-17 [?] RSPM (R 4.5.0)\n P tibble 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyr * 1.3.2 2025-12-19 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n P TMB 1.9.19 2025-12-15 [?] RSPM (R 4.5.0)\n P utf8 1.2.6 2025-06-08 [?] RSPM (R 4.5.0)\n P V8 8.0.1 2025-10-10 [?] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n P xml2 1.5.2 2026-01-17 [?] RSPM (R 4.5.0)\n P xtable 1.8-4 2019-04-21 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n P zoo 1.8-15 2025-12-15 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n", "supporting": [ "rbmi_continuous_joint_files" ], diff --git a/_freeze/R/rbmi_continuous_joint/figure-html/explore data 2-1.png b/_freeze/R/rbmi_continuous_joint/figure-html/explore data 2-1.png index 43f1472a2..89dc44f52 100644 Binary files a/_freeze/R/rbmi_continuous_joint/figure-html/explore data 2-1.png and b/_freeze/R/rbmi_continuous_joint/figure-html/explore data 2-1.png differ diff --git a/_freeze/R/recurrent_events/execute-results/html.json b/_freeze/R/recurrent_events/execute-results/html.json index 34997c7a5..d7fdae788 100644 --- a/_freeze/R/recurrent_events/execute-results/html.json +++ b/_freeze/R/recurrent_events/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "1fcc34f117daac3f5f215bf211c5e432", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R Recurrent Events\"\n---\n\n\n\n# Recurrent event models\n\n## Setup\n\n### General libraries\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(dplyr)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'dplyr'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:stats':\n\n filter, lag\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:base':\n\n intersect, setdiff, setequal, union\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(gt)\n```\n:::\n\n\n### Methodology specific libraries\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\n```\n:::\n\n\n## Modelling recurrent events\n\n### Methodology introduction\n\nTraditionally, survival analysis focuses on the time to a *single* first event. While there are many applications for such time-to-event analysis in clinical trials, this approach falls short when events of interest can occur multiple times within the same subject. Recurrent event models extend the traditional Cox proportional hazards framework to account for *multiple* events per subject ([Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/), [Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nIn this tutorial, we will demonstrate how to implement different recurrent event models in R, specifically the Andersen-Gill, proportional means/rates, Prentice-Williams-Peterson, and Wei-Lin-Weissfeld models, using the well-known [`survival` package](https://cran.r-project.org/web/packages/survival/vignettes/survival.pdf). The R code follows the layout of [Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf), with additional insights taken from [Lu et al. 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf).\n\nRecurrent event models can roughly be divided in three categories: counting process models, conditional models and marginal models. In the section below, we will explore the difference between each of these approaches. In addition, important aspects of data structure will be discussed by means of two fictional subjects, one with 4 events and 0 censored observations (events at time 6, 9, 56 and 88), and another with 2 events and 1 censored observation (events at time 42, 57, and censored at time 91).\n\nDefine the following:\n\n::: callout-note\n$\\lambda_i(t)$: hazard function for the $i$th subject at time $t$\n\n$\\lambda_{ij}(t)$: hazard function for the $j$th event of the $i$th subject at time $t$\n\n$\\lambda_0(t)$: common baseline hazard for all events\n\n$\\lambda_{0j}(t)$: event-specific baseline hazard for the $j$th event at time $t$\n\n$\\beta$: common parameter vector\n\n$\\beta_j$: event-specific parameter vector for the $j$th event\n\n$X_{ij}$: covariate vector for the $j$th event of the $i$th subject\n:::\n\n### Counting process models\n\n#### Andersen-Gill model ([Andersen & Gill 1982](https://projecteuclid.org/journals/annals-of-statistics/volume-10/issue-4/Coxs-Regression-Model-for-Counting-Processes--A-Large-Sample/10.1214/aos/1176345976.full))\n\n$$\n\\lambda_i(t) = \\lambda_0(t) \\exp \\left( \\beta X_{ij}(t) \\right) \\\n$$\n\n- Counting process approach: treats each subject as a multiple events counting process\n\n- Common baseline hazard $\\lambda_0(t)$\n\n- Common regression coefficients $\\beta$\n\n- Unrestricted risk set: a subject contributes to the risk set for an event as long as the subject is under observation, i.e. it can be at risk for a subsequent event even though the previous event did not yet occur\n\n- Order of events is not important\n\nAn essential assumption of the Andersen-Gill model is that of **independent events** within subjects. This, however, is often not realistic in clinical trial data. For example, let's say that we are modelling myocardial infarction (MI). If a patient has already experienced one MI, their risk of subsequent events may increase due to underlying cardiovascular damage or presence of other risk factors. Thus, the more events a patient has, the more likely they are to experience future events, indicating dependence rather than independence. To accurately model this within-subject correlation, extensions like time-varying covariates, a robust sandwich covariance estimator or frailty terms may be needed. In this tutorial, we will discuss the sandwich correction.\n\n**Lin-Wei-Yang-Ying (LWYY) model or proportional means/rates model ([Lin, Wei, Yang & Ying 2000](https://www.jstor.org/stable/2680616))**\n\nLin, Wei, Yang, and Ying introduced an improved version of the Andersen-Gill model in 2000 (often referred to as proportional means/rates model), featuring a robust sandwich estimator that explicitly accounts for individual subject clusters. These robust standard errors yield wider confidence intervals and provide asymptotically valid inference even when the independence assumption does not hold ([Lee et al. 2025](https://pubmed.ncbi.nlm.nih.gov/40490702/)). The original and improved Andersen-Gill model often appear interchangeable in the literature, and while they produce identical estimates, their robust standard errors can differ substantially, which may impact the conclusions drawn from statistical inference.\n\nFor both versions of the Andersen-Gill model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 1 |\n| 1 | (9, 56\\] | 1 | 1 |\n| 1 | (56, 88\\] | 1 | 1 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 1 |\n| 2 | (87, 91\\] | 0 | 1 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/AG_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn both versions of the Andersen-Gill model, each new time interval starts where the previous one ends.\n\n### Conditional models\n\n#### Prentice-Williams-Peterson model ([Prentice, Williams & Peterson 1981](https://academic.oup.com/biomet/article-abstract/68/2/373/260402?redirectedFrom=fulltext))\n\n- Conditional approach: incorporates conditional strata to account for ordering/dependence of events\n\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n\n- Stratified regression coefficients $\\beta_j$: can be pooled ($\\beta$) or kept as event-specific ($\\beta_j$) in the output\n\n- Restricted risk set: contributions to the risk set for a subsequent event are restricted to only consider subjects that already experienced the previous event\n\n- Order of events is important\n\nThe Prentice-Williams-Peterson model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$.\n\nDepending on the outcome of interest, Prentice, Williams and Peterson suggested two distinct models:\n\n1. **Total time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe total time variant of the Prentice-Williams-Peterson model uses the same time intervals as the counting process approach (Andersen-Gill model), which is useful for modelling the full time course ($t$) of the recurrent event process, i.e. the hazard of *any* recurrence.\n\nFor the total time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 2 |\n| 1 | (9, 56\\] | 1 | 3 |\n| 1 | (56, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 2 |\n| 2 | (87, 91\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPtt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nAgain, in the total time model, each new time intervals starts where the previous one ends.\n\n2. **Gap time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t - t_{j-1}) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe gap time variant of the Prentice-Williams-Peterson model uses time intervals that start at zero and end at the length of time until the next event, which is useful for modelling the time between each of the recurring events ($t - t_{j-1}$), i.e. the hazard of recurrence *after the previous event*.\n\nFor the gap time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 3\\] | 1 | 2 |\n| 1 | (0, 47\\] | 1 | 3 |\n| 1 | (0, 32\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 45\\] | 1 | 2 |\n| 2 | (0, 3\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPgt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn the gap time model, each time interval starts at zero and has a length equal to the gap time between two neighboring events.\n\n### Marginal models\n\n#### Wei-Lin-Weissfeld model ([Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084))\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\n- Marginal approach: treats each (recurrent) event as having a separate, marginal process\n\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n\n- Stratified regression coefficients $\\beta_j$: can be pooled ($\\beta$) or kept as event-specific ($\\beta_j$) in the output\n\n- Semi-restricted risk set: all subjects contribute follow-up times to all *potential* events, i.e. each subject is at risk for all *potential* events, regardless of how many events that subject actually experiences\n\n- Order of events is not important\n\nAlthough the Wei-Lin-Weissfeld model has it roots in competing risks analysis, it conveniently lends itself to model recurrent events as well. Like the Andersen-Gill model, the Wei-Lin-Weissfeld model also assumes **independence** of events, which is often not feasible in practice. In addition, it is assumed there is no specific order among the events or that the events are different types of events, and not necessarily *recurrent* events.\n\nLike the Prentice-Williams-Peterson models, the Wei-Lin-Weissfeld model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$. Another approach is to combine event-specific effects $\\beta_j$ to get an estimator of the average effect, as described in [Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084) (this is not discussed further here).\n\nFor Wei-Lin-Weissfeld models, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 9\\] | 1 | 2 |\n| 1 | (0, 56\\] | 1 | 3 |\n| 1 | (0, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 87\\] | 1 | 2 |\n| 2 | (0, 91\\] | 0 | 3 |\n| 2 | (0, 91\\] | 0 | 4 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/WLW_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn the Wei-Lin-Weissfeld model, each time intervals starts at zero and ends at its respective event time.\n\n### Overview of all models\n\nIn summary, the selection of the model to use would depend on the type of events, the importance of the order of the events and the time intervals to be analyzed. We made an effort to summarize the similarities and differences between the models in the table below.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Approach** | counting process | conditional | conditional | marginal |\n| **Baseline hazard** | common | stratified | stratified | stratified |\n| **Regression coefficients** | common | stratified possible | stratified possible | stratified possible |\n| **Risk set** | unrestricted | restricted | restricted | semi-restricted |\n| **Time interval** | total time | total time | gap time | total time |\n| **Order of events** | not important | important | important | not important |\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nNote that, because the ordering of events is not important in the Andersen-Gill and Wei-Lin-Weissfeld model, these models come with the assumption of **independence** of events. In contrast, the Prentice-Williams-Peterson models overcome the need for this assumption by capturing the dependence structure between recurrent events in conditional strata. Consequently, events are assumed to be *conditionally* independent in the Prentice-Williams-Peterson models.\n\nA nice visual representation of the stratification and time interval structure of each model is given below. The correct data structure is pivotal when modelling recurrent events and depends on the methodology you want to use, as illustrated in the figure.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/combined_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Modelling recurrent events using the `survival` package\n\n### Data\n\nFor this tutorial we will use the [`bladder` data](https://rdrr.io/cran/survival/man/bladder.html) from the [`survival` package](https://cran.r-project.org/web/packages/survival/survival.pdf), which captures recurrences of bladder cancer from a clinical trial for an oncolytic called thiotepa. The `bladder` data is regularly used by many statisticians to demonstrate methodology for recurrent event modelling. Somewhat confusingly, there are three versions of this data available:\n\n- `bladder1`: original data from the study on all subjects (294 records)\n\n- `bladder2`: data in Andersen-Gill format on subset of subjects with nonzero follow-up time (178 records)\n\n- `bladder`: data in Wei-Lin-Weissfeld format on subset of subjects with nonzero follow-up time (340 records)\n\nFor this tutorial, we will use `bladder2` to illustrate Andersen-Gill and Prentice-Williams-Peterson models, and `bladder` to illustrate the Wei-Lin-Weissfeld model.\n\nThe variables included in both datasets are:\n\n- **id**: patient id\n\n- **rx**: treatment group (1 = placebo, 2 = thiotepa)\n\n- **number**: initial number of tumors (8 = 8 or more)\n\n- **size**: size in cm of largest initial tumor\n\n- **start**: start of time interval; this variable is **not** present in `bladder`\n\n- **stop**: (recurrent) event or censoring time\n\n- **event**: event indicator (1 = event, 0 = censored)\n\n- **enum**: order of recurrence\n\nImportantly, both datasets collect the data in a **counting process** structure. This means that there is one record for each subject and time interval, where a time interval is defined as the time to its respective event (**event** = 1), or the time to follow-up if the event did not occur (**event** = 0).\n\nLet's look more closely at the `bladder2` and `bladder` data:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- survival::bladder2\ngt(head(bladder2, 6))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
idrxnumbersizestartstopeventenum
11130101
21210401
31110701
415101001
51410611
514161002
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
246
327
420
\n
\n```\n\n:::\n:::\n\n\nIn `bladder2`, in the Andersen-Gill format, each subject has a variable amount of records, depending on the amount of events that subject experienced.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder <- survival::bladder\ngt(head(bladder, 20))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
idrxnumbersizestopeventenum
1113101
1113102
1113103
1113104
2121401
2121402
2121403
2121404
3111701
3111702
3111703
3111704
41511001
41511002
41511003
41511004
5141611
51411002
51411003
51411004
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
285
385
485
\n
\n```\n\n:::\n:::\n\n\nIn `bladder`, in the Wei-Lin-Weissfeld format, each subject has four records, regardless of how many events that subject actually experienced. In addition, there is no `start` variable, as all time intervals start at zero.\n\n### Analysis\n\nIn the `survival` package, any survival analysis based on the Cox proportional hazard model can be conducted using the `coxph()` function. Hence, conveniently, when modelling time-to-event data with recurrent events, the same function can be used. The caveat here is that an adequate data structure is required, which must be in correspondence with the model you want to use.\n\nIn this section of the tutorial, we will explain how the arguments of the `coxph()` function and data structure must be defined to fit every type of recurrent event model correctly.\n\n#### Andersen-Gill model\n\n1. **Improved Andersen-Gill model (LWYY model or proportional means/rates model)**\n\nFor the improved version of the Andersen-Gill model you must specify:\n\n- `formula = Surv(start, stop, event) ~ 'predictors'`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event |\n|-----|---------------|-------|------|-------|\n| 1 | (0, 1\\] | 0 | 1 | 0 |\n| 2 | (0, 4\\] | 0 | 4 | 0 |\n| 3 | (0, 7\\] | 0 | 7 | 0 |\n| 4 | (0, 10\\] | 0 | 10 | 0 |\n| 5 | (0, 6\\] | 0 | 6 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 |\n\nWe will use the `bladder2` data for this.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size,\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\nsummary(AG)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.25801 -1.782 0.07474 . \nnumber 0.17164 1.18726 0.04733 0.06131 2.799 0.00512 **\nsize -0.04256 0.95833 0.06903 0.07555 -0.563 0.57317 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.3808 1.047\nnumber 1.1873 0.8423 1.0528 1.339\nsize 0.9583 1.0435 0.8264 1.111\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 11.76 on 3 df, p=0.008\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 11.44 p=0.01\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nBy defining the `cluster` argument, `coxph()` will automatically set `robust = TRUE`, and compute a robust sandwich covariance. The summary function will then display both the non-robust (`se(coef)`) and robust (`robust se`) standard error estimates. Under the hood, the robust standard errors will consider all **id** clusters separately and ultimately sum up the score residuals for each distinct cluster.\n\n2. **Original Andersen-Gill model**\n\nTo our knowledge, the original Andersen-Gill model of 1989 can only be fitted in R by adding an artificial clustering variable with unique entries to the `bladder2` data, which we call `id2`. This artificial clustering variable will ignore any clustering that is actually present in the data.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- bladder2 %>%\n dplyr::mutate(id2 = row_number())\n```\n:::\n\n\nExcept for `cluster = id2`, the rest of the code remains the same.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG_original <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size,\n ties = \"breslow\",\n cluster = id2,\n data = bladder2)\nsummary(AG_original)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id2)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.22902 -2.008 0.04468 * \nnumber 0.17164 1.18726 0.04733 0.06469 2.653 0.00797 **\nsize -0.04256 0.95833 0.06903 0.07325 -0.581 0.56119 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.4031 0.9891\nnumber 1.1873 0.8423 1.0459 1.3477\nsize 0.9583 1.0435 0.8302 1.1063\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 9.36 on 3 df, p=0.02\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 14.27 p=0.003\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nAlthough the original Andersen-Gill model does not consider separate **id** clusters, it still computes robust standard errors using the sandwich estimator, as `robust = TRUE`. The resulting robust standard errors (`robust se`) differ from those provided for the improved Andersen-Gill model, while the estimated coefficients (`coef`) and non-robust standard errors (`se(coef)`) remain perfectly unchanged.\n\n#### Prentice-Williams-Peterson model\n\n1. **Total time model**\n\nFor the Prentice-Williams-Peterson total time model you must specify:\n\n- `formula = Surv(start, stop, event) ~ 'predictors' + strata(enum)`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event | Enum |\n|-----|---------------|-------|------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 | 2 |\n\nWe will use the `bladder2` data for this.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPtt <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size + strata(enum),\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\nsummary(PWPtt)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size + strata(enum), data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.334295 0.715842 0.216087 0.197064 -1.696 0.0898 .\nnumber 0.115653 1.122606 0.053681 0.049913 2.317 0.0205 *\nsize -0.008051 0.991982 0.072725 0.060124 -0.134 0.8935 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.7158 1.3970 0.4865 1.053\nnumber 1.1226 0.8908 1.0180 1.238\nsize 0.9920 1.0081 0.8817 1.116\n\nConcordance= 0.615 (se = 0.032 )\nLikelihood ratio test= 6.11 on 3 df, p=0.1\nWald test = 7.19 on 3 df, p=0.07\nScore (logrank) test = 6.45 on 3 df, p=0.09, Robust = 8.76 p=0.03\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nThe conditional strata of the Prentice-Williams-Peterson model are set by `strata(enum)` in the formula, where `enum` captures the ordering of recurrent events.\n\n2. **Gap time model**\n\nFor the Prentice-Williams-Peterson gap time model you must specify:\n\n- `formula = Surv(gtime, event) ~ 'predictors' + strata(enum)`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event | Enum |\n|-----|---------------|-------|------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 4\\] | 0 | 4 | 0 | 2 |\n\nThis data structure can be achieved in `bladder2` by adding a `gtime` variable.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- bladder2 %>%\n dplyr::mutate(gtime = stop - start)\n \ngt(head(bladder2, 6))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n
idrxnumbersizestartstopeventenumid2gtime
1113010111
2121040124
3111070137
415101001410
5141061156
51416100264
\n
\n```\n\n:::\n:::\n\n\nWe artificially set start = 0 for each gap time interval by including `gtime` instead of `start, stop` in the `Surv()` object.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPgt <- coxph(Surv(gtime, event) ~ as.factor(rx) + number + size + strata(enum),\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\nsummary(PWPgt)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(gtime, event) ~ as.factor(rx) + number + \n size + strata(enum), data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.26952 0.76375 0.20766 0.20808 -1.295 0.19522 \nnumber 0.15353 1.16595 0.05211 0.04889 3.140 0.00169 **\nsize 0.00684 1.00686 0.07001 0.06222 0.110 0.91246 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.7637 1.3093 0.5080 1.148\nnumber 1.1659 0.8577 1.0594 1.283\nsize 1.0069 0.9932 0.8913 1.137\n\nConcordance= 0.596 (se = 0.032 )\nLikelihood ratio test= 8.76 on 3 df, p=0.03\nWald test = 12.14 on 3 df, p=0.007\nScore (logrank) test = 9.6 on 3 df, p=0.02, Robust = 10.21 p=0.02\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\nFor the Wei-Lin-Weissfeld model you must specify:\n\n- `formula = Surv(stop, event) ~ 'predictors' + strata(enum)`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event | Enum |\n|-----|---------------|-------|------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 2 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 3 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 4 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 2 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 3 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 4 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 2 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 3 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 4 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 4 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 4 |\n\nWe will use the `bladder` data for this.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nWLW <- coxph(Surv(stop, event) ~ as.factor(rx) + number + size + strata(enum),\n ties = \"breslow\",\n cluster = id,\n data = bladder)\nsummary(WLW)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(stop, event) ~ as.factor(rx) + number + \n size + strata(enum), data = bladder, ties = \"breslow\", cluster = id)\n\n n= 340, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.57986 0.55998 0.20118 0.30344 -1.911 0.0560 . \nnumber 0.20849 1.23182 0.04691 0.06567 3.175 0.0015 **\nsize -0.05094 0.95034 0.06967 0.09304 -0.548 0.5840 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.5600 1.7858 0.3089 1.015\nnumber 1.2318 0.8118 1.0830 1.401\nsize 0.9503 1.0523 0.7919 1.140\n\nConcordance= 0.663 (se = 0.036 )\nLikelihood ratio test= 24.71 on 3 df, p=2e-05\nWald test = 15.56 on 3 df, p=0.001\nScore (logrank) test = 27.89 on 3 df, p=4e-06, Robust = 11.75 p=0.008\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nImportantly, the strata of the Wei-Lin-Weissfeld model as set by `strata(enum)` are substantially different from the conditional strata of the Prentice-Williams-Peterson model. The `enum` variable is now no longer assumed to be an ordinal variable.\n\n#### Important notes\n\n[**Note:**]{.underline} For all recurrent event models, another way of defining the subject clusters is by using `cluster(id)` in the formula, rather than setting `cluster = id`. This results in the same outcomes, as shown below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG_v1 <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size,\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\n\nsummary(AG_v1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.25801 -1.782 0.07474 . \nnumber 0.17164 1.18726 0.04733 0.06131 2.799 0.00512 **\nsize -0.04256 0.95833 0.06903 0.07555 -0.563 0.57317 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.3808 1.047\nnumber 1.1873 0.8423 1.0528 1.339\nsize 0.9583 1.0435 0.8264 1.111\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 11.76 on 3 df, p=0.008\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 11.44 p=0.01\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG_v2 <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size + cluster(id),\n ties = \"breslow\",\n data = bladder2)\n\nsummary(AG_v2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.25801 -1.782 0.07474 . \nnumber 0.17164 1.18726 0.04733 0.06131 2.799 0.00512 **\nsize -0.04256 0.95833 0.06903 0.07555 -0.563 0.57317 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.3808 1.047\nnumber 1.1873 0.8423 1.0528 1.339\nsize 0.9583 1.0435 0.8264 1.111\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 11.76 on 3 df, p=0.008\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 11.44 p=0.01\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n[**Note:**]{.underline} R uses `ties = \"efron\"` by default, while SAS uses `ties = breslow` by default. In this tutorial, we forced R to use `ties = \"breslow\"` to match SAS for all recurrent event models. For more information, be sure to check the [CAMIS webpage](https://psiaims.github.io/CAMIS/Comp/r-sas_survival.html) on the comparison of Cox proportional hazards models in R and SAS.\n\n### Interpretation\n\nIn terms of interpretation, hazard ratios ($\\exp(\\beta_j)$) are often used when making inferences based on Cox proportional hazards models. Now, as you may remember from the overview presented earlier, it is important to recognize that each of the recurrent event models comes with a slightly different interpretation of the hazard ratio, as defined by the assumptions around the model.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nThis means that, for the `bladder` data, we can draw slightly different conclusions on the hazard ratio of the group treated with thiotepa (**rx** = 2) versus the placebo group (**rx** = 1).\n\n| Model | HR: rx2 vs rx1 | 95% CI | P-value |\n|-------------|----------------|----------------|---------|\n| AG | 0.631 | 0.381 to 1.047 | 0.0747 |\n| Original AG | 0.631 | 0.403 to 0.989 | 0.0447 |\n| PWPtt | 0.716 | 0.487 to 1.053 | 0.0898 |\n| PWPgt | 0.764 | 0.508 to 1.148 | 0.1952 |\n| WLW | 0.560 | 0.309 to 1.015 | 0.0560 |\n\nThese conclusions are:\n\n- **Andersen-Gill**: the risk of having *any new tumor recurrence* in the treatment group is 0.631 (0.381 - 1.047) times that of the placebo group\n\n- **Prentice-Williams-Peterson: total time**: the risk of having *any new tumor recurrence* in the treatment group is 0.716 (0.487 - 1.053) times that of the placebo group\n\n- **Prentice-Williams-Peterson: gap time**: the risk of having *a new tumor recurrence after a previous event* in the treatment group is 0.764 (0.508 - 1.148) times that of the placebo group\n\n- **Wei-Lin-Weissfeld**: the risk of having *any type of event* in the treatment group is 0.560 (0.309 - 1.015) times that of the placebo group\n\n[**Note:**]{.underline} The improved Andersen-Gill model (LWYY model or proportional means/rates model) is preferred over the original Andersen-Gill model.\n\n### Event-specific estimates\n\nFor the Prentice-Williams-Peterson and Wei-Lin-Weissfeld models we can incorporate both overall ($\\beta$) and event-specific ($\\beta_j$) effects for each covariate. To arrive at pooled model parameters these models assume that $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$. Until now, we have only considered pooled model parameters, but given the underlying stratification of these two models in particular, it may be valuable to look into the event-specific estimates as well ([Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nTo output event-specific estimates for the treatment effect (**rx**), we simply specify `rx:strata(enum)` in the formula.\n\n#### Prentice-Williams-Peterson model\n\n**Total time model**\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPtt_stratified <- coxph(Surv(start, stop, event) ~ rx:strata(enum) + number + size,\n ties = \"breslow\", cluster = id, data = bladder2)\nsummary(PWPtt_stratified)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ rx:strata(enum) + \n number + size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nnumber 0.111071 1.117475 0.054397 0.050063 2.219 0.0265 *\nsize -0.006674 0.993349 0.073173 0.061409 -0.109 0.9135 \nrx:strata(enum)enum=1 -0.409893 0.663721 0.304293 0.286737 -1.430 0.1529 \nrx:strata(enum)enum=2 -0.416339 0.659457 0.398266 0.423632 -0.983 0.3257 \nrx:strata(enum)enum=3 -0.142970 0.866780 0.593818 0.405303 -0.353 0.7243 \nrx:strata(enum)enum=4 0.105362 1.111112 0.679095 0.469706 0.224 0.8225 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nnumber 1.1175 0.8949 1.0130 1.233\nsize 0.9933 1.0067 0.8807 1.120\nrx:strata(enum)enum=1 0.6637 1.5067 0.3784 1.164\nrx:strata(enum)enum=2 0.6595 1.5164 0.2875 1.513\nrx:strata(enum)enum=3 0.8668 1.1537 0.3917 1.918\nrx:strata(enum)enum=4 1.1111 0.9000 0.4425 2.790\n\nConcordance= 0.609 (se = 0.033 )\nLikelihood ratio test= 6.73 on 6 df, p=0.3\nWald test = 8.22 on 6 df, p=0.2\nScore (logrank) test = 7.02 on 6 df, p=0.3, Robust = 9.37 p=0.2\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n**Gap time model**\n\nPWPgt_stratified \\<- coxph(Surv(gtime, event) \\~ rx:strata(enum) + number + size,\n\nties = \"breslow\", cluster = id, data = bladder2)\n\nsummary(PWPgt_stratified)\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPgt_stratified <- coxph(Surv(gtime, event) ~ rx:strata(enum) + number + size,\n ties = \"breslow\", cluster = id, data = bladder2)\nsummary(PWPgt_stratified)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(gtime, event) ~ rx:strata(enum) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nnumber 0.15193 1.16408 0.05259 0.04864 3.124 0.00179 **\nsize 0.01319 1.01328 0.07021 0.06297 0.209 0.83406 \nrx:strata(enum)enum=1 -0.43665 0.64620 0.30521 0.28376 -1.539 0.12385 \nrx:strata(enum)enum=2 -0.30182 0.73947 0.39752 0.38907 -0.776 0.43789 \nrx:strata(enum)enum=3 0.01485 1.01497 0.47722 0.49843 0.030 0.97622 \nrx:strata(enum)enum=4 0.06019 1.06204 0.58289 0.53982 0.112 0.91122 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nnumber 1.1641 0.8590 1.0582 1.281\nsize 1.0133 0.9869 0.8956 1.146\nrx:strata(enum)enum=1 0.6462 1.5475 0.3705 1.127\nrx:strata(enum)enum=2 0.7395 1.3523 0.3449 1.585\nrx:strata(enum)enum=3 1.0150 0.9853 0.3821 2.696\nrx:strata(enum)enum=4 1.0620 0.9416 0.3687 3.059\n\nConcordance= 0.603 (se = 0.032 )\nLikelihood ratio test= 9.73 on 6 df, p=0.1\nWald test = 14.45 on 6 df, p=0.02\nScore (logrank) test = 10.49 on 6 df, p=0.1, Robust = 11.41 p=0.08\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\n\n::: {.cell}\n\n```{.r .cell-code}\nWLW_stratified <- coxph(Surv(stop, event) ~ rx:strata(enum) + number + size,\n ties = \"breslow\", cluster = id, data = bladder)\nsummary(WLW_stratified)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(stop, event) ~ rx:strata(enum) + number + \n size, data = bladder, ties = \"breslow\", cluster = id)\n\n n= 340, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nnumber 0.20747 1.23056 0.04685 0.06573 3.156 0.0016 **\nsize -0.05187 0.94945 0.06966 0.09300 -0.558 0.5770 \nrx:strata(enum)enum=1 -0.47890 0.61947 0.30583 0.28313 -1.691 0.0908 . \nrx:strata(enum)enum=2 -0.64914 0.52249 0.39217 0.36826 -1.763 0.0779 . \nrx:strata(enum)enum=3 -0.71783 0.48781 0.45971 0.42148 -1.703 0.0885 . \nrx:strata(enum)enum=4 -0.56175 0.57021 0.56143 0.49592 -1.133 0.2573 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nnumber 1.2306 0.8126 1.0818 1.400\nsize 0.9494 1.0532 0.7912 1.139\nrx:strata(enum)enum=1 0.6195 1.6143 0.3556 1.079\nrx:strata(enum)enum=2 0.5225 1.9139 0.2539 1.075\nrx:strata(enum)enum=3 0.4878 2.0500 0.2135 1.114\nrx:strata(enum)enum=4 0.5702 1.7537 0.2157 1.507\n\nConcordance= 0.661 (se = 0.036 )\nLikelihood ratio test= 24.95 on 6 df, p=3e-04\nWald test = 16.54 on 6 df, p=0.01\nScore (logrank) test = 28.19 on 6 df, p=9e-05, Robust = 11.92 p=0.06\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n## References\n\n[Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf). Eat, Sleep, R, Repeat.\n\n[Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/). Modelling recurrent events: a tutorial for analysis in epidemiology. *International Journal of Epidemiology*. 2015 Feb;44(1):324-33.\n\n[Andersen & Gill 1982](https://www.jstor.org/stable/2240714). Cox’s Regression Model for Counting Processes: A Large Sample Study. *The Annals of Statistics*. 10(4):1100–1120.\n\n[bladder data](https://rdrr.io/cran/survival/man/bladder.html)\n\n[Lee et al. 2025](https://pubmed.ncbi.nlm.nih.gov/40490702/). Comparison of total event analysis and first event analysis in relation to heterogeneity in cardiovascular trials. *BMC Medical Research Methodology*. 2025 Jun 9;25(1):159.\n\n[Lin, Wei, Yang & Ying 2000](https://www.jstor.org/stable/2680616). Semiparametric regression for the mean and rate functions of recurrent events. *Journal of the Royal Statistical Society: Series B.* 62(4):711–730.\n\n[Lu & Shen 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf). Application of Survival Analysis in Multiple Events Using SAS. *PharmaSUG 2018*.\n\n[Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/). A systematic comparison of recurrent event models for application to composite endpoints. *BMC Medical Research Methodology*. 2018 Jan 4;18(1):2.\n\n[Prentice, Williams & Peterson 1981](https://www.jstor.org/stable/2335582). On the Regression Analysis of Multivariate Failure Time Data. *Biometrika*. 68(2):373–379.\n\n[survival package](https://cran.r-project.org/web/packages/survival/vignettes/survival.pdf)\n\n[Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084). Regression Analysis of Multivariate Incomplete Failure Time Data by Modeling Marginal Distributions. *Journal of the American Statistical Association*. 84(408):1065–1073.\n\n::: {.callout-note collapse=\"true\" title=\"Session info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P fs 1.6.6 2025-04-12 [?] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n P gt * 1.3.0 2026-01-22 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n P sass 0.4.10 2025-04-11 [?] RSPM\n P sessioninfo 1.2.3 2025-02-05 [?] RSPM\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P tibble 3.3.1 2026-01-11 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n P xml2 1.5.2 2026-01-17 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"R Recurrent Events\"\n---\n\n\n\n# Recurrent event models\n\n## Setup\n\n### General libraries\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(dplyr)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'dplyr'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:stats':\n\n filter, lag\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following objects are masked from 'package:base':\n\n intersect, setdiff, setequal, union\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(gt)\n```\n:::\n\n\n### Methodology specific libraries\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\n```\n:::\n\n\n## Modelling recurrent events\n\n### Methodology introduction\n\nTraditionally, survival analysis focuses on the time to a *single* first event. While there are many applications for such time-to-event analysis in clinical trials, this approach falls short when events of interest can occur multiple times within the same subject. Recurrent event models extend the traditional Cox proportional hazards framework to account for *multiple* events per subject ([Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/), [Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nIn this tutorial, we will demonstrate how to implement different recurrent event models in R, specifically the Andersen-Gill, proportional means/rates, Prentice-Williams-Peterson, and Wei-Lin-Weissfeld models, using the well-known [`survival` package](https://cran.r-project.org/web/packages/survival/vignettes/survival.pdf). The R code follows the layout of [Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf), with additional insights taken from [Lu et al. 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf).\n\nRecurrent event models can roughly be divided in three categories: counting process models, conditional models and marginal models. In the section below, we will explore the difference between each of these approaches. In addition, important aspects of data structure will be discussed by means of two fictional subjects, one with 4 events and 0 censored observations (events at time 6, 9, 56 and 88), and another with 2 events and 1 censored observation (events at time 42, 57, and censored at time 91).\n\nDefine the following:\n\n::: callout-note\n$\\lambda_i(t)$: hazard function for the $i$th subject at time $t$\n\n$\\lambda_{ij}(t)$: hazard function for the $j$th event of the $i$th subject at time $t$\n\n$\\lambda_0(t)$: common baseline hazard for all events\n\n$\\lambda_{0j}(t)$: event-specific baseline hazard for the $j$th event at time $t$\n\n$\\beta$: common parameter vector\n\n$\\beta_j$: event-specific parameter vector for the $j$th event\n\n$X_{ij}$: covariate vector for the $j$th event of the $i$th subject\n:::\n\n### Counting process models\n\n#### Andersen-Gill model ([Andersen & Gill 1982](https://projecteuclid.org/journals/annals-of-statistics/volume-10/issue-4/Coxs-Regression-Model-for-Counting-Processes--A-Large-Sample/10.1214/aos/1176345976.full))\n\n$$\n\\lambda_i(t) = \\lambda_0(t) \\exp \\left( \\beta X_{ij}(t) \\right) \\\n$$\n\n- Counting process approach: treats each subject as a multiple events counting process\n\n- Common baseline hazard $\\lambda_0(t)$\n\n- Common regression coefficients $\\beta$\n\n- Unrestricted risk set: a subject contributes to the risk set for an event as long as the subject is under observation, i.e. it can be at risk for a subsequent event even though the previous event did not yet occur\n\n- Order of events is not important\n\nAn essential assumption of the Andersen-Gill model is that of **independent events** within subjects. This, however, is often not realistic in clinical trial data. For example, let's say that we are modelling myocardial infarction (MI). If a patient has already experienced one MI, their risk of subsequent events may increase due to underlying cardiovascular damage or presence of other risk factors. Thus, the more events a patient has, the more likely they are to experience future events, indicating dependence rather than independence. To accurately model this within-subject correlation, extensions like time-varying covariates, a robust sandwich covariance estimator or frailty terms may be needed. In this tutorial, we will discuss the sandwich correction.\n\n**Lin-Wei-Yang-Ying (LWYY) model or proportional means/rates model ([Lin, Wei, Yang & Ying 2000](https://www.jstor.org/stable/2680616))**\n\nLin, Wei, Yang, and Ying introduced an improved version of the Andersen-Gill model in 2000 (often referred to as proportional means/rates model), featuring a robust sandwich estimator that explicitly accounts for individual subject clusters. These robust standard errors yield wider confidence intervals and provide asymptotically valid inference even when the independence assumption does not hold ([Lee et al. 2025](https://pubmed.ncbi.nlm.nih.gov/40490702/)). The original and improved Andersen-Gill model often appear interchangeable in the literature, and while they produce identical estimates, their robust standard errors can differ substantially, which may impact the conclusions drawn from statistical inference.\n\nFor both versions of the Andersen-Gill model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 1 |\n| 1 | (9, 56\\] | 1 | 1 |\n| 1 | (56, 88\\] | 1 | 1 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 1 |\n| 2 | (87, 91\\] | 0 | 1 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/AG_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn both versions of the Andersen-Gill model, each new time interval starts where the previous one ends.\n\n### Conditional models\n\n#### Prentice-Williams-Peterson model ([Prentice, Williams & Peterson 1981](https://academic.oup.com/biomet/article-abstract/68/2/373/260402?redirectedFrom=fulltext))\n\n- Conditional approach: incorporates conditional strata to account for ordering/dependence of events\n\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n\n- Stratified regression coefficients $\\beta_j$: can be pooled ($\\beta$) or kept as event-specific ($\\beta_j$) in the output\n\n- Restricted risk set: contributions to the risk set for a subsequent event are restricted to only consider subjects that already experienced the previous event\n\n- Order of events is important\n\nThe Prentice-Williams-Peterson model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$.\n\nDepending on the outcome of interest, Prentice, Williams and Peterson suggested two distinct models:\n\n1. **Total time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe total time variant of the Prentice-Williams-Peterson model uses the same time intervals as the counting process approach (Andersen-Gill model), which is useful for modelling the full time course ($t$) of the recurrent event process, i.e. the hazard of *any* recurrence.\n\nFor the total time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 2 |\n| 1 | (9, 56\\] | 1 | 3 |\n| 1 | (56, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 2 |\n| 2 | (87, 91\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPtt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nAgain, in the total time model, each new time intervals starts where the previous one ends.\n\n2. **Gap time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t - t_{j-1}) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe gap time variant of the Prentice-Williams-Peterson model uses time intervals that start at zero and end at the length of time until the next event, which is useful for modelling the time between each of the recurring events ($t - t_{j-1}$), i.e. the hazard of recurrence *after the previous event*.\n\nFor the gap time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 3\\] | 1 | 2 |\n| 1 | (0, 47\\] | 1 | 3 |\n| 1 | (0, 32\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 45\\] | 1 | 2 |\n| 2 | (0, 3\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPgt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn the gap time model, each time interval starts at zero and has a length equal to the gap time between two neighboring events.\n\n### Marginal models\n\n#### Wei-Lin-Weissfeld model ([Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084))\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\n- Marginal approach: treats each (recurrent) event as having a separate, marginal process\n\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n\n- Stratified regression coefficients $\\beta_j$: can be pooled ($\\beta$) or kept as event-specific ($\\beta_j$) in the output\n\n- Semi-restricted risk set: all subjects contribute follow-up times to all *potential* events, i.e. each subject is at risk for all *potential* events, regardless of how many events that subject actually experiences\n\n- Order of events is not important\n\nAlthough the Wei-Lin-Weissfeld model has it roots in competing risks analysis, it conveniently lends itself to model recurrent events as well. Like the Andersen-Gill model, the Wei-Lin-Weissfeld model also assumes **independence** of events, which is often not feasible in practice. In addition, it is assumed there is no specific order among the events or that the events are different types of events, and not necessarily *recurrent* events.\n\nLike the Prentice-Williams-Peterson models, the Wei-Lin-Weissfeld model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$. Another approach is to combine event-specific effects $\\beta_j$ to get an estimator of the average effect, as described in [Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084) (this is not discussed further here).\n\nFor Wei-Lin-Weissfeld models, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 9\\] | 1 | 2 |\n| 1 | (0, 56\\] | 1 | 3 |\n| 1 | (0, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 87\\] | 1 | 2 |\n| 2 | (0, 91\\] | 0 | 3 |\n| 2 | (0, 91\\] | 0 | 4 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/WLW_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn the Wei-Lin-Weissfeld model, each time intervals starts at zero and ends at its respective event time.\n\n### Overview of all models\n\nIn summary, the selection of the model to use would depend on the type of events, the importance of the order of the events and the time intervals to be analyzed. We made an effort to summarize the similarities and differences between the models in the table below.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Approach** | counting process | conditional | conditional | marginal |\n| **Baseline hazard** | common | stratified | stratified | stratified |\n| **Regression coefficients** | common | stratified possible | stratified possible | stratified possible |\n| **Risk set** | unrestricted | restricted | restricted | semi-restricted |\n| **Time interval** | total time | total time | gap time | total time |\n| **Order of events** | not important | important | important | not important |\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nNote that, because the ordering of events is not important in the Andersen-Gill and Wei-Lin-Weissfeld model, these models come with the assumption of **independence** of events. In contrast, the Prentice-Williams-Peterson models overcome the need for this assumption by capturing the dependence structure between recurrent events in conditional strata. Consequently, events are assumed to be *conditionally* independent in the Prentice-Williams-Peterson models.\n\nA nice visual representation of the stratification and time interval structure of each model is given below. The correct data structure is pivotal when modelling recurrent events and depends on the methodology you want to use, as illustrated in the figure.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/combined_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Modelling recurrent events using the `survival` package\n\n### Data\n\nFor this tutorial we will use the [`bladder` data](https://rdrr.io/cran/survival/man/bladder.html) from the [`survival` package](https://cran.r-project.org/web/packages/survival/survival.pdf), which captures recurrences of bladder cancer from a clinical trial for an oncolytic called thiotepa. The `bladder` data is regularly used by many statisticians to demonstrate methodology for recurrent event modelling. Somewhat confusingly, there are three versions of this data available:\n\n- `bladder1`: original data from the study on all subjects (294 records)\n\n- `bladder2`: data in Andersen-Gill format on subset of subjects with nonzero follow-up time (178 records)\n\n- `bladder`: data in Wei-Lin-Weissfeld format on subset of subjects with nonzero follow-up time (340 records)\n\nFor this tutorial, we will use `bladder2` to illustrate Andersen-Gill and Prentice-Williams-Peterson models, and `bladder` to illustrate the Wei-Lin-Weissfeld model.\n\nThe variables included in both datasets are:\n\n- **id**: patient id\n\n- **rx**: treatment group (1 = placebo, 2 = thiotepa)\n\n- **number**: initial number of tumors (8 = 8 or more)\n\n- **size**: size in cm of largest initial tumor\n\n- **start**: start of time interval; this variable is **not** present in `bladder`\n\n- **stop**: (recurrent) event or censoring time\n\n- **event**: event indicator (1 = event, 0 = censored)\n\n- **enum**: order of recurrence\n\nImportantly, both datasets collect the data in a **counting process** structure. This means that there is one record for each subject and time interval, where a time interval is defined as the time to its respective event (**event** = 1), or the time to follow-up if the event did not occur (**event** = 0).\n\nLet's look more closely at the `bladder2` and `bladder` data:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- survival::bladder2\ngt(head(bladder2, 6))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
idrxnumbersizestartstopeventenum
11130101
21210401
31110701
415101001
51410611
514161002
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
246
327
420
\n
\n```\n\n:::\n:::\n\n\nIn `bladder2`, in the Andersen-Gill format, each subject has a variable amount of records, depending on the amount of events that subject experienced.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder <- survival::bladder\ngt(head(bladder, 20))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
idrxnumbersizestopeventenum
1113101
1113102
1113103
1113104
2121401
2121402
2121403
2121404
3111701
3111702
3111703
3111704
41511001
41511002
41511003
41511004
5141611
51411002
51411003
51411004
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
285
385
485
\n
\n```\n\n:::\n:::\n\n\nIn `bladder`, in the Wei-Lin-Weissfeld format, each subject has four records, regardless of how many events that subject actually experienced. In addition, there is no `start` variable, as all time intervals start at zero.\n\n### Analysis\n\nIn the `survival` package, any survival analysis based on the Cox proportional hazard model can be conducted using the `coxph()` function. Hence, conveniently, when modelling time-to-event data with recurrent events, the same function can be used. The caveat here is that an adequate data structure is required, which must be in correspondence with the model you want to use.\n\nIn this section of the tutorial, we will explain how the arguments of the `coxph()` function and data structure must be defined to fit every type of recurrent event model correctly.\n\n#### Andersen-Gill model\n\n1. **Improved Andersen-Gill model (LWYY model or proportional means/rates model)**\n\nFor the improved version of the Andersen-Gill model you must specify:\n\n- `formula = Surv(start, stop, event) ~ 'predictors'`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event |\n|-----|---------------|-------|------|-------|\n| 1 | (0, 1\\] | 0 | 1 | 0 |\n| 2 | (0, 4\\] | 0 | 4 | 0 |\n| 3 | (0, 7\\] | 0 | 7 | 0 |\n| 4 | (0, 10\\] | 0 | 10 | 0 |\n| 5 | (0, 6\\] | 0 | 6 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 |\n\nWe will use the `bladder2` data for this.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size,\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\nsummary(AG)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.25801 -1.782 0.07474 . \nnumber 0.17164 1.18726 0.04733 0.06131 2.799 0.00512 **\nsize -0.04256 0.95833 0.06903 0.07555 -0.563 0.57317 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.3808 1.047\nnumber 1.1873 0.8423 1.0528 1.339\nsize 0.9583 1.0435 0.8264 1.111\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 11.76 on 3 df, p=0.008\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 11.44 p=0.01\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nBy defining the `cluster` argument, `coxph()` will automatically set `robust = TRUE`, and compute a robust sandwich covariance. The summary function will then display both the non-robust (`se(coef)`) and robust (`robust se`) standard error estimates. Under the hood, the robust standard errors will consider all **id** clusters separately and ultimately sum up the score residuals for each distinct cluster.\n\n2. **Original Andersen-Gill model**\n\nTo our knowledge, the original Andersen-Gill model of 1989 can only be fitted in R by adding an artificial clustering variable with unique entries to the `bladder2` data, which we call `id2`. This artificial clustering variable will ignore any clustering that is actually present in the data.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- bladder2 %>%\n dplyr::mutate(id2 = row_number())\n```\n:::\n\n\nExcept for `cluster = id2`, the rest of the code remains the same.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG_original <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size,\n ties = \"breslow\",\n cluster = id2,\n data = bladder2)\nsummary(AG_original)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id2)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.22902 -2.008 0.04468 * \nnumber 0.17164 1.18726 0.04733 0.06469 2.653 0.00797 **\nsize -0.04256 0.95833 0.06903 0.07325 -0.581 0.56119 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.4031 0.9891\nnumber 1.1873 0.8423 1.0459 1.3477\nsize 0.9583 1.0435 0.8302 1.1063\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 9.36 on 3 df, p=0.02\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 14.27 p=0.003\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nAlthough the original Andersen-Gill model does not consider separate **id** clusters, it still computes robust standard errors using the sandwich estimator, as `robust = TRUE`. The resulting robust standard errors (`robust se`) differ from those provided for the improved Andersen-Gill model, while the estimated coefficients (`coef`) and non-robust standard errors (`se(coef)`) remain perfectly unchanged.\n\n#### Prentice-Williams-Peterson model\n\n1. **Total time model**\n\nFor the Prentice-Williams-Peterson total time model you must specify:\n\n- `formula = Surv(start, stop, event) ~ 'predictors' + strata(enum)`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event | Enum |\n|-----|---------------|-------|------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 | 2 |\n\nWe will use the `bladder2` data for this.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPtt <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size + strata(enum),\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\nsummary(PWPtt)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size + strata(enum), data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.334295 0.715842 0.216087 0.197064 -1.696 0.0898 .\nnumber 0.115653 1.122606 0.053681 0.049913 2.317 0.0205 *\nsize -0.008051 0.991982 0.072725 0.060124 -0.134 0.8935 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.7158 1.3970 0.4865 1.053\nnumber 1.1226 0.8908 1.0180 1.238\nsize 0.9920 1.0081 0.8817 1.116\n\nConcordance= 0.615 (se = 0.032 )\nLikelihood ratio test= 6.11 on 3 df, p=0.1\nWald test = 7.19 on 3 df, p=0.07\nScore (logrank) test = 6.45 on 3 df, p=0.09, Robust = 8.76 p=0.03\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nThe conditional strata of the Prentice-Williams-Peterson model are set by `strata(enum)` in the formula, where `enum` captures the ordering of recurrent events.\n\n2. **Gap time model**\n\nFor the Prentice-Williams-Peterson gap time model you must specify:\n\n- `formula = Surv(gtime, event) ~ 'predictors' + strata(enum)`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event | Enum |\n|-----|---------------|-------|------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 4\\] | 0 | 4 | 0 | 2 |\n\nThis data structure can be achieved in `bladder2` by adding a `gtime` variable.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- bladder2 %>%\n dplyr::mutate(gtime = stop - start)\n \ngt(head(bladder2, 6))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n
idrxnumbersizestartstopeventenumid2gtime
1113010111
2121040124
3111070137
415101001410
5141061156
51416100264
\n
\n```\n\n:::\n:::\n\n\nWe artificially set start = 0 for each gap time interval by including `gtime` instead of `start, stop` in the `Surv()` object.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPgt <- coxph(Surv(gtime, event) ~ as.factor(rx) + number + size + strata(enum),\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\nsummary(PWPgt)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(gtime, event) ~ as.factor(rx) + number + \n size + strata(enum), data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.26952 0.76375 0.20766 0.20808 -1.295 0.19522 \nnumber 0.15353 1.16595 0.05211 0.04889 3.140 0.00169 **\nsize 0.00684 1.00686 0.07001 0.06222 0.110 0.91246 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.7637 1.3093 0.5080 1.148\nnumber 1.1659 0.8577 1.0594 1.283\nsize 1.0069 0.9932 0.8913 1.137\n\nConcordance= 0.596 (se = 0.032 )\nLikelihood ratio test= 8.76 on 3 df, p=0.03\nWald test = 12.14 on 3 df, p=0.007\nScore (logrank) test = 9.6 on 3 df, p=0.02, Robust = 10.21 p=0.02\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\nFor the Wei-Lin-Weissfeld model you must specify:\n\n- `formula = Surv(stop, event) ~ 'predictors' + strata(enum)`\n\n- `cluster = id`\n\nAnd the data structure must be:\n\n| Id | Time interval | Start | Stop | Event | Enum |\n|-----|---------------|-------|------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 2 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 3 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 4 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 2 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 3 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 4 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 2 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 3 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 4 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 4 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 4 |\n\nWe will use the `bladder` data for this.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nWLW <- coxph(Surv(stop, event) ~ as.factor(rx) + number + size + strata(enum),\n ties = \"breslow\",\n cluster = id,\n data = bladder)\nsummary(WLW)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(stop, event) ~ as.factor(rx) + number + \n size + strata(enum), data = bladder, ties = \"breslow\", cluster = id)\n\n n= 340, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.57986 0.55998 0.20118 0.30344 -1.911 0.0560 . \nnumber 0.20849 1.23182 0.04691 0.06567 3.175 0.0015 **\nsize -0.05094 0.95034 0.06967 0.09304 -0.548 0.5840 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.5600 1.7858 0.3089 1.015\nnumber 1.2318 0.8118 1.0830 1.401\nsize 0.9503 1.0523 0.7919 1.140\n\nConcordance= 0.663 (se = 0.036 )\nLikelihood ratio test= 24.71 on 3 df, p=2e-05\nWald test = 15.56 on 3 df, p=0.001\nScore (logrank) test = 27.89 on 3 df, p=4e-06, Robust = 11.75 p=0.008\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\nImportantly, the strata of the Wei-Lin-Weissfeld model as set by `strata(enum)` are substantially different from the conditional strata of the Prentice-Williams-Peterson model. The `enum` variable is now no longer assumed to be an ordinal variable.\n\n#### Important notes\n\n[**Note:**]{.underline} For all recurrent event models, another way of defining the subject clusters is by using `cluster(id)` in the formula, rather than setting `cluster = id`. This results in the same outcomes, as shown below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG_v1 <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size,\n ties = \"breslow\",\n cluster = id,\n data = bladder2)\n\nsummary(AG_v1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.25801 -1.782 0.07474 . \nnumber 0.17164 1.18726 0.04733 0.06131 2.799 0.00512 **\nsize -0.04256 0.95833 0.06903 0.07555 -0.563 0.57317 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.3808 1.047\nnumber 1.1873 0.8423 1.0528 1.339\nsize 0.9583 1.0435 0.8264 1.111\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 11.76 on 3 df, p=0.008\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 11.44 p=0.01\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nAG_v2 <- coxph(Surv(start, stop, event) ~ as.factor(rx) + number + size + cluster(id),\n ties = \"breslow\",\n data = bladder2)\n\nsummary(AG_v2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ as.factor(rx) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nas.factor(rx)2 -0.45979 0.63142 0.19996 0.25801 -1.782 0.07474 . \nnumber 0.17164 1.18726 0.04733 0.06131 2.799 0.00512 **\nsize -0.04256 0.95833 0.06903 0.07555 -0.563 0.57317 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nas.factor(rx)2 0.6314 1.5837 0.3808 1.047\nnumber 1.1873 0.8423 1.0528 1.339\nsize 0.9583 1.0435 0.8264 1.111\n\nConcordance= 0.634 (se = 0.032 )\nLikelihood ratio test= 16.77 on 3 df, p=8e-04\nWald test = 11.76 on 3 df, p=0.008\nScore (logrank) test = 18.57 on 3 df, p=3e-04, Robust = 11.44 p=0.01\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n[**Note:**]{.underline} R uses `ties = \"efron\"` by default, while SAS uses `ties = breslow` by default. In this tutorial, we forced R to use `ties = \"breslow\"` to match SAS for all recurrent event models. For more information, be sure to check the [CAMIS webpage](https://psiaims.github.io/CAMIS/Comp/r-sas_survival.html) on the comparison of Cox proportional hazards models in R and SAS.\n\n### Interpretation\n\nIn terms of interpretation, hazard ratios ($\\exp(\\beta_j)$) are often used when making inferences based on Cox proportional hazards models. Now, as you may remember from the overview presented earlier, it is important to recognize that each of the recurrent event models comes with a slightly different interpretation of the hazard ratio, as defined by the assumptions around the model.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nThis means that, for the `bladder` data, we can draw slightly different conclusions on the hazard ratio of the group treated with thiotepa (**rx** = 2) versus the placebo group (**rx** = 1).\n\n| Model | HR: rx2 vs rx1 | 95% CI | P-value |\n|-------------|----------------|----------------|---------|\n| AG | 0.631 | 0.381 to 1.047 | 0.0747 |\n| Original AG | 0.631 | 0.403 to 0.989 | 0.0447 |\n| PWPtt | 0.716 | 0.487 to 1.053 | 0.0898 |\n| PWPgt | 0.764 | 0.508 to 1.148 | 0.1952 |\n| WLW | 0.560 | 0.309 to 1.015 | 0.0560 |\n\nThese conclusions are:\n\n- **Andersen-Gill**: the risk of having *any new tumor recurrence* in the treatment group is 0.631 (0.381 - 1.047) times that of the placebo group\n\n- **Prentice-Williams-Peterson: total time**: the risk of having *any new tumor recurrence* in the treatment group is 0.716 (0.487 - 1.053) times that of the placebo group\n\n- **Prentice-Williams-Peterson: gap time**: the risk of having *a new tumor recurrence after a previous event* in the treatment group is 0.764 (0.508 - 1.148) times that of the placebo group\n\n- **Wei-Lin-Weissfeld**: the risk of having *any type of event* in the treatment group is 0.560 (0.309 - 1.015) times that of the placebo group\n\n[**Note:**]{.underline} The improved Andersen-Gill model (LWYY model or proportional means/rates model) is preferred over the original Andersen-Gill model.\n\n### Event-specific estimates\n\nFor the Prentice-Williams-Peterson and Wei-Lin-Weissfeld models we can incorporate both overall ($\\beta$) and event-specific ($\\beta_j$) effects for each covariate. To arrive at pooled model parameters these models assume that $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$. Until now, we have only considered pooled model parameters, but given the underlying stratification of these two models in particular, it may be valuable to look into the event-specific estimates as well ([Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nTo output event-specific estimates for the treatment effect (**rx**), we simply specify `rx:strata(enum)` in the formula.\n\n#### Prentice-Williams-Peterson model\n\n**Total time model**\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPtt_stratified <- coxph(Surv(start, stop, event) ~ rx:strata(enum) + number + size,\n ties = \"breslow\", cluster = id, data = bladder2)\nsummary(PWPtt_stratified)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(start, stop, event) ~ rx:strata(enum) + \n number + size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nnumber 0.111071 1.117475 0.054397 0.050063 2.219 0.0265 *\nsize -0.006674 0.993349 0.073173 0.061409 -0.109 0.9135 \nrx:strata(enum)enum=1 -0.409893 0.663721 0.304293 0.286737 -1.430 0.1529 \nrx:strata(enum)enum=2 -0.416339 0.659457 0.398266 0.423632 -0.983 0.3257 \nrx:strata(enum)enum=3 -0.142970 0.866780 0.593818 0.405303 -0.353 0.7243 \nrx:strata(enum)enum=4 0.105362 1.111112 0.679095 0.469706 0.224 0.8225 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nnumber 1.1175 0.8949 1.0130 1.233\nsize 0.9933 1.0067 0.8807 1.120\nrx:strata(enum)enum=1 0.6637 1.5067 0.3784 1.164\nrx:strata(enum)enum=2 0.6595 1.5164 0.2875 1.513\nrx:strata(enum)enum=3 0.8668 1.1537 0.3917 1.918\nrx:strata(enum)enum=4 1.1111 0.9000 0.4425 2.790\n\nConcordance= 0.609 (se = 0.033 )\nLikelihood ratio test= 6.73 on 6 df, p=0.3\nWald test = 8.22 on 6 df, p=0.2\nScore (logrank) test = 7.02 on 6 df, p=0.3, Robust = 9.37 p=0.2\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n**Gap time model**\n\nPWPgt_stratified \\<- coxph(Surv(gtime, event) \\~ rx:strata(enum) + number + size,\n\nties = \"breslow\", cluster = id, data = bladder2)\n\nsummary(PWPgt_stratified)\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPWPgt_stratified <- coxph(Surv(gtime, event) ~ rx:strata(enum) + number + size,\n ties = \"breslow\", cluster = id, data = bladder2)\nsummary(PWPgt_stratified)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(gtime, event) ~ rx:strata(enum) + number + \n size, data = bladder2, ties = \"breslow\", cluster = id)\n\n n= 178, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nnumber 0.15193 1.16408 0.05259 0.04864 3.124 0.00179 **\nsize 0.01319 1.01328 0.07021 0.06297 0.209 0.83406 \nrx:strata(enum)enum=1 -0.43665 0.64620 0.30521 0.28376 -1.539 0.12385 \nrx:strata(enum)enum=2 -0.30182 0.73947 0.39752 0.38907 -0.776 0.43789 \nrx:strata(enum)enum=3 0.01485 1.01497 0.47722 0.49843 0.030 0.97622 \nrx:strata(enum)enum=4 0.06019 1.06204 0.58289 0.53982 0.112 0.91122 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nnumber 1.1641 0.8590 1.0582 1.281\nsize 1.0133 0.9869 0.8956 1.146\nrx:strata(enum)enum=1 0.6462 1.5475 0.3705 1.127\nrx:strata(enum)enum=2 0.7395 1.3523 0.3449 1.585\nrx:strata(enum)enum=3 1.0150 0.9853 0.3821 2.696\nrx:strata(enum)enum=4 1.0620 0.9416 0.3687 3.059\n\nConcordance= 0.603 (se = 0.032 )\nLikelihood ratio test= 9.73 on 6 df, p=0.1\nWald test = 14.45 on 6 df, p=0.02\nScore (logrank) test = 10.49 on 6 df, p=0.1, Robust = 11.41 p=0.08\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\n\n::: {.cell}\n\n```{.r .cell-code}\nWLW_stratified <- coxph(Surv(stop, event) ~ rx:strata(enum) + number + size,\n ties = \"breslow\", cluster = id, data = bladder)\nsummary(WLW_stratified)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\ncoxph(formula = Surv(stop, event) ~ rx:strata(enum) + number + \n size, data = bladder, ties = \"breslow\", cluster = id)\n\n n= 340, number of events= 112 \n\n coef exp(coef) se(coef) robust se z Pr(>|z|) \nnumber 0.20747 1.23056 0.04685 0.06573 3.156 0.0016 **\nsize -0.05187 0.94945 0.06966 0.09300 -0.558 0.5770 \nrx:strata(enum)enum=1 -0.47890 0.61947 0.30583 0.28313 -1.691 0.0908 . \nrx:strata(enum)enum=2 -0.64914 0.52249 0.39217 0.36826 -1.763 0.0779 . \nrx:strata(enum)enum=3 -0.71783 0.48781 0.45971 0.42148 -1.703 0.0885 . \nrx:strata(enum)enum=4 -0.56175 0.57021 0.56143 0.49592 -1.133 0.2573 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nnumber 1.2306 0.8126 1.0818 1.400\nsize 0.9494 1.0532 0.7912 1.139\nrx:strata(enum)enum=1 0.6195 1.6143 0.3556 1.079\nrx:strata(enum)enum=2 0.5225 1.9139 0.2539 1.075\nrx:strata(enum)enum=3 0.4878 2.0500 0.2135 1.114\nrx:strata(enum)enum=4 0.5702 1.7537 0.2157 1.507\n\nConcordance= 0.661 (se = 0.036 )\nLikelihood ratio test= 24.95 on 6 df, p=3e-04\nWald test = 16.54 on 6 df, p=0.01\nScore (logrank) test = 28.19 on 6 df, p=9e-05, Robust = 11.92 p=0.06\n\n (Note: the likelihood ratio and score tests assume independence of\n observations within a cluster, the Wald and robust score tests do not).\n```\n\n\n:::\n:::\n\n\n## References\n\n[Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf). Eat, Sleep, R, Repeat.\n\n[Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/). Modelling recurrent events: a tutorial for analysis in epidemiology. *International Journal of Epidemiology*. 2015 Feb;44(1):324-33.\n\n[Andersen & Gill 1982](https://www.jstor.org/stable/2240714). Cox’s Regression Model for Counting Processes: A Large Sample Study. *The Annals of Statistics*. 10(4):1100–1120.\n\n[bladder data](https://rdrr.io/cran/survival/man/bladder.html)\n\n[Lee et al. 2025](https://pubmed.ncbi.nlm.nih.gov/40490702/). Comparison of total event analysis and first event analysis in relation to heterogeneity in cardiovascular trials. *BMC Medical Research Methodology*. 2025 Jun 9;25(1):159.\n\n[Lin, Wei, Yang & Ying 2000](https://www.jstor.org/stable/2680616). Semiparametric regression for the mean and rate functions of recurrent events. *Journal of the Royal Statistical Society: Series B.* 62(4):711–730.\n\n[Lu & Shen 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf). Application of Survival Analysis in Multiple Events Using SAS. *PharmaSUG 2018*.\n\n[Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/). A systematic comparison of recurrent event models for application to composite endpoints. *BMC Medical Research Methodology*. 2018 Jan 4;18(1):2.\n\n[Prentice, Williams & Peterson 1981](https://www.jstor.org/stable/2335582). On the Regression Analysis of Multivariate Failure Time Data. *Biometrika*. 68(2):373–379.\n\n[survival package](https://cran.r-project.org/web/packages/survival/vignettes/survival.pdf)\n\n[Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084). Regression Analysis of Multivariate Incomplete Failure Time Data by Modeling Marginal Distributions. *Journal of the American Statistical Association*. 84(408):1065–1073.\n\n::: {.callout-note collapse=\"true\" title=\"Session info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n P fs 1.6.6 2025-04-12 [?] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n P gt * 1.3.0 2026-01-22 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n P sass 0.4.10 2025-04-11 [?] RSPM (R 4.5.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.5.0)\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P tibble 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n P xml2 1.5.2 2026-01-17 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/rounding/execute-results/html.json b/_freeze/R/rounding/execute-results/html.json index f884b729e..3be3eae4c 100644 --- a/_freeze/R/rounding/execute-results/html.json +++ b/_freeze/R/rounding/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "cb72f51d04e445daeccdf17554e8cd13", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Rounding in R\"\n---\n\n\n\n## round from R base\n\nThe **round()** function in Base R will round to the nearest whole number and 'rounding to the even number' when equidistant, meaning that exactly 12.5 rounds to the integer 12.\n\nThe **round(12.5,digits=1)** function tells R to round to 1 decimal place.\n\nHowever, rounding is dependent on OS services and on representation error since for example, if 0.15 is not represented exactly, if could actually be the number 0.15000000001 or 0.149999999999! The rounding rule applies to the represented number and not to the printed number, and so round(0.15, 1) could be either 0.1 or 0.2).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nround(1:9 / 10 + 0.05, 1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.2 0.2 0.3 0.4 0.6 0.7 0.8 0.9 1.0\n```\n\n\n:::\n:::\n\n\n## round_half_up from package janitor\n\nNote that the [janitor](https://sfirke.github.io/janitor/) package in R contains a function `round_half_up()` that rounds away from zero. In this case it rounds to the nearest whole number and 'away from zero' or 'rounding up' when equidistant, meaning that exactly 12.5 rounds to the integer 13.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Example code\nmy_number <- c(2.2, 3.99, 1.2345, 7.876, 13.8739)\n\nr_0_dec <- round(my_number, digits = 0)\nr_0_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2 4 1 8 14\n```\n\n\n:::\n\n```{.r .cell-code}\nr_1_dec <- round(my_number, digits = 1)\nr_1_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.2 4.0 1.2 7.9 13.9\n```\n\n\n:::\n\n```{.r .cell-code}\nr_2_dec <- round(my_number, digits = 2)\nr_2_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.20 3.99 1.23 7.88 13.87\n```\n\n\n:::\n\n```{.r .cell-code}\nr_3_dec <- round(my_number, digits = 3)\nr_3_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.200 3.990 1.234 7.876 13.874\n```\n\n\n:::\n:::\n\n\nIf using the janitor package in R, and the function `round_half_up()`, the results would be the same with the exception of rounding 1.2345 to 3 decimal places where a result of 1.235 would be obtained instead of 1.234. However, in some rare cases, `round_half_up()` does not return result as expected. There are two kinds of cases for it. 1. Round down for positive decimal like 0.xx5.\n\n\n::: {.cell}\n\n```{.r .cell-code}\njanitor::round_half_up(524288.1255, digits = 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 524288.1\n```\n\n\n:::\n:::\n\n\nThe cause is that when the decimal is stored in binary, the value usually does not exactly the same with the original number. In the example above, 524288.1255 is stored as a value a little less than the original value. Then `round_half_up()` rounds it down.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 22)\n524288.1255\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 524288.1254999999655411\n```\n\n\n:::\n:::\n\n\nIn `round_half_up()`, a small decimal `sqrt(.Machine$double.eps)` is added before rounding. It avoids some incorrect rounding due to the stored numeric value is a little less than the original value, but does not cover all conditions.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nround_half_up <- function(x, digits = 0) {\n posneg <- sign(x)\n z <- abs(x) * 10^digits\n z <- z + 0.5 + sqrt(.Machine$double.eps)\n z <- trunc(z)\n z <- z / 10^digits\n z * posneg\n}\n```\n:::\n\n\nMore examples can be found from the code below. It creates numeric values containing different digit numbers of integer part and decimal part, and all ending with 5 for rounding.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 15) # set digit number to display\nint1 <- c(0, 2^(1:19)) # create values of integer part\nround_digits <- 1:7 # define values of rounding digits\n\ndec1 <- 2^(-round_digits) + 10^(-round_digits - 1) * 5 # create values of decimal part\n\ndf1 <- cross_join(tibble(int1), tibble(dec1, round_digits)) |>\n mutate(num1 = int1 + dec1) # combine integer part and decimal part\n\ndf1 |>\n mutate(rounded_num = round_half_up(num1, round_digits)) |> # round the numbers\n filter(rounded_num < num1) |> # incorrect if rounded result is less than the original number\n print.data.frame()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n int1 dec1 round_digits num1 rounded_num\n1 32768 0.01562550 6 32768.01562550 32768.0156250\n2 65536 0.03125500 5 65536.03125500 65536.0312500\n3 262144 0.06255000 4 262144.06255000 262144.0625000\n4 262144 0.03125500 5 262144.03125500 262144.0312500\n5 524288 0.12550000 3 524288.12550000 524288.1250000\n6 524288 0.00781255 7 524288.00781255 524288.0078125\n```\n\n\n:::\n:::\n\n\n6 of 140 numbers have incorrect results. Most of them are big numbers or long decimals to round.\n\n2. Round up for positive decimal like 0.4999....\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 16)\nround_half_up(1.4999999851, 0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2\n```\n\n\n:::\n:::\n\n\nIt occurs when the number is smaller than but so closed to 0.xx5. As described in point 1 above, in `round_half_up()`, a small decimal `sqrt(.Machine$double.eps)` is added before rounding, which causes a number bigger than 0.xx5 to be rounded. It occurs only when the decimal is long, so `round_half_up()` is still reliable.\\\nAnd the added decimal `sqrt(.Machine$double.eps)` is necessary. Without it, or even replace it to a smaller decimal, there will be more incorrect results under point 1, as the example below. Some of them are common, e.g. rounding 16.1255 to 3 decimals.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# a new function to round away from zero, by replacing sqrt(.Machine$double.eps) in round_half_up to a smaller number\nround_half_up_test <- function(x, digits = 0) {\n posneg <- sign(x)\n z <- abs(x) * 10^digits\n z <- z + 0.5 + .Machine$double.eps * 100\n z <- trunc(z)\n z <- z / 10^digits\n z * posneg\n}\n\noptions(digits = 15)\ndf1 |>\n mutate(rounded_num = round_half_up_test(num1, round_digits)) |>\n filter(rounded_num < num1) |>\n print.data.frame()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n int1 dec1 round_digits num1 rounded_num\n1 2 0.03125500 5 2.03125500 2.0312500\n2 4 0.01562550 6 4.01562550 4.0156250\n3 16 0.12550000 3 16.12550000 16.1250000\n4 16 0.01562550 6 16.01562550 16.0156250\n5 128 0.12550000 3 128.12550000 128.1250000\n6 128 0.06255000 4 128.06255000 128.0625000\n7 128 0.03125500 5 128.03125500 128.0312500\n8 8192 0.25500000 2 8192.25500000 8192.2500000\n9 16384 0.12550000 3 16384.12550000 16384.1250000\n10 32768 0.25500000 2 32768.25500000 32768.2500000\n11 32768 0.01562550 6 32768.01562550 32768.0156250\n12 65536 0.12550000 3 65536.12550000 65536.1250000\n13 65536 0.03125500 5 65536.03125500 65536.0312500\n14 262144 0.06255000 4 262144.06255000 262144.0625000\n15 262144 0.03125500 5 262144.03125500 262144.0312500\n16 524288 0.12550000 3 524288.12550000 524288.1250000\n17 524288 0.00781255 7 524288.00781255 524288.0078125\n```\n\n\n:::\n:::\n\n\n## Other methods\n\nhttps://stackoverflow.com/a/12688836 discussed multiple algorithms to round away from zero, including the one implemented in `round_half_up()`. Below is another algorithm modified from it.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nround_v2 <- function(x, digits = 0, eps = .Machine$double.eps) {\n round(x + x * eps, digits = digits)\n}\n```\n:::\n\n\nLike `round_half_up()`, it also contains the two kinds of incorrect results. And like `round_half_up()`, a small decimal is added to make 0.xx5 round up. The parameter `eps` is provided to let user decide which small decimal to add.\n\nTo avoid the rounding issue totally, the only way is to increase precision, e.g. using package `Rmpfr`. It will need CPU resource. And it's not always necessary considering the accuracy of current functions.\n\n## round5() from package cards\n\nThe `cards::round5()` package does the same rounding as the `janitor::round_half_up()`.\n\n## Conclusion\n\nSo far, `round_half_up()` from package janitor (or `cards::round5()` ) is still one of the best solutions to round away from zero, though users may meet incorrect results in rare cases when the numbers are big or the decimal is long.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 7) # This just returns the number of displayed digits back to the default\n```\n:::\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P janitor * 2.2.1 2024-12-22 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"Rounding in R\"\n---\n\n\n\n## round from R base\n\nThe **round()** function in Base R will round to the nearest whole number and 'rounding to the even number' when equidistant, meaning that exactly 12.5 rounds to the integer 12.\n\nThe **round(12.5,digits=1)** function tells R to round to 1 decimal place.\n\nHowever, rounding is dependent on OS services and on representation error since for example, if 0.15 is not represented exactly, if could actually be the number 0.15000000001 or 0.149999999999! The rounding rule applies to the represented number and not to the printed number, and so round(0.15, 1) could be either 0.1 or 0.2).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nround(1:9 / 10 + 0.05, 1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.2 0.2 0.3 0.4 0.6 0.7 0.8 0.9 1.0\n```\n\n\n:::\n:::\n\n\n## round_half_up from package janitor\n\nNote that the [janitor](https://sfirke.github.io/janitor/) package in R contains a function `round_half_up()` that rounds away from zero. In this case it rounds to the nearest whole number and 'away from zero' or 'rounding up' when equidistant, meaning that exactly 12.5 rounds to the integer 13.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Example code\nmy_number <- c(2.2, 3.99, 1.2345, 7.876, 13.8739)\n\nr_0_dec <- round(my_number, digits = 0)\nr_0_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2 4 1 8 14\n```\n\n\n:::\n\n```{.r .cell-code}\nr_1_dec <- round(my_number, digits = 1)\nr_1_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.2 4.0 1.2 7.9 13.9\n```\n\n\n:::\n\n```{.r .cell-code}\nr_2_dec <- round(my_number, digits = 2)\nr_2_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.20 3.99 1.23 7.88 13.87\n```\n\n\n:::\n\n```{.r .cell-code}\nr_3_dec <- round(my_number, digits = 3)\nr_3_dec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.200 3.990 1.234 7.876 13.874\n```\n\n\n:::\n:::\n\n\nIf using the janitor package in R, and the function `round_half_up()`, the results would be the same with the exception of rounding 1.2345 to 3 decimal places where a result of 1.235 would be obtained instead of 1.234. However, in some rare cases, `round_half_up()` does not return result as expected. There are two kinds of cases for it. 1. Round down for positive decimal like 0.xx5.\n\n\n::: {.cell}\n\n```{.r .cell-code}\njanitor::round_half_up(524288.1255, digits = 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 524288.1\n```\n\n\n:::\n:::\n\n\nThe cause is that when the decimal is stored in binary, the value usually does not exactly the same with the original number. In the example above, 524288.1255 is stored as a value a little less than the original value. Then `round_half_up()` rounds it down.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 22)\n524288.1255\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 524288.1254999999655411\n```\n\n\n:::\n:::\n\n\nIn `round_half_up()`, a small decimal `sqrt(.Machine$double.eps)` is added before rounding. It avoids some incorrect rounding due to the stored numeric value is a little less than the original value, but does not cover all conditions.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nround_half_up <- function(x, digits = 0) {\n posneg <- sign(x)\n z <- abs(x) * 10^digits\n z <- z + 0.5 + sqrt(.Machine$double.eps)\n z <- trunc(z)\n z <- z / 10^digits\n z * posneg\n}\n```\n:::\n\n\nMore examples can be found from the code below. It creates numeric values containing different digit numbers of integer part and decimal part, and all ending with 5 for rounding.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 15) # set digit number to display\nint1 <- c(0, 2^(1:19)) # create values of integer part\nround_digits <- 1:7 # define values of rounding digits\n\ndec1 <- 2^(-round_digits) + 10^(-round_digits - 1) * 5 # create values of decimal part\n\ndf1 <- cross_join(tibble(int1), tibble(dec1, round_digits)) |>\n mutate(num1 = int1 + dec1) # combine integer part and decimal part\n\ndf1 |>\n mutate(rounded_num = round_half_up(num1, round_digits)) |> # round the numbers\n filter(rounded_num < num1) |> # incorrect if rounded result is less than the original number\n print.data.frame()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n int1 dec1 round_digits num1 rounded_num\n1 32768 0.01562550 6 32768.01562550 32768.0156250\n2 65536 0.03125500 5 65536.03125500 65536.0312500\n3 262144 0.06255000 4 262144.06255000 262144.0625000\n4 262144 0.03125500 5 262144.03125500 262144.0312500\n5 524288 0.12550000 3 524288.12550000 524288.1250000\n6 524288 0.00781255 7 524288.00781255 524288.0078125\n```\n\n\n:::\n:::\n\n\n6 of 140 numbers have incorrect results. Most of them are big numbers or long decimals to round.\n\n2. Round up for positive decimal like 0.4999....\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 16)\nround_half_up(1.4999999851, 0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2\n```\n\n\n:::\n:::\n\n\nIt occurs when the number is smaller than but so closed to 0.xx5. As described in point 1 above, in `round_half_up()`, a small decimal `sqrt(.Machine$double.eps)` is added before rounding, which causes a number bigger than 0.xx5 to be rounded. It occurs only when the decimal is long, so `round_half_up()` is still reliable.\\\nAnd the added decimal `sqrt(.Machine$double.eps)` is necessary. Without it, or even replace it to a smaller decimal, there will be more incorrect results under point 1, as the example below. Some of them are common, e.g. rounding 16.1255 to 3 decimals.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# a new function to round away from zero, by replacing sqrt(.Machine$double.eps) in round_half_up to a smaller number\nround_half_up_test <- function(x, digits = 0) {\n posneg <- sign(x)\n z <- abs(x) * 10^digits\n z <- z + 0.5 + .Machine$double.eps * 100\n z <- trunc(z)\n z <- z / 10^digits\n z * posneg\n}\n\noptions(digits = 15)\ndf1 |>\n mutate(rounded_num = round_half_up_test(num1, round_digits)) |>\n filter(rounded_num < num1) |>\n print.data.frame()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n int1 dec1 round_digits num1 rounded_num\n1 2 0.03125500 5 2.03125500 2.0312500\n2 4 0.01562550 6 4.01562550 4.0156250\n3 16 0.12550000 3 16.12550000 16.1250000\n4 16 0.01562550 6 16.01562550 16.0156250\n5 128 0.12550000 3 128.12550000 128.1250000\n6 128 0.06255000 4 128.06255000 128.0625000\n7 128 0.03125500 5 128.03125500 128.0312500\n8 8192 0.25500000 2 8192.25500000 8192.2500000\n9 16384 0.12550000 3 16384.12550000 16384.1250000\n10 32768 0.25500000 2 32768.25500000 32768.2500000\n11 32768 0.01562550 6 32768.01562550 32768.0156250\n12 65536 0.12550000 3 65536.12550000 65536.1250000\n13 65536 0.03125500 5 65536.03125500 65536.0312500\n14 262144 0.06255000 4 262144.06255000 262144.0625000\n15 262144 0.03125500 5 262144.03125500 262144.0312500\n16 524288 0.12550000 3 524288.12550000 524288.1250000\n17 524288 0.00781255 7 524288.00781255 524288.0078125\n```\n\n\n:::\n:::\n\n\n## Other methods\n\nhttps://stackoverflow.com/a/12688836 discussed multiple algorithms to round away from zero, including the one implemented in `round_half_up()`. Below is another algorithm modified from it.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nround_v2 <- function(x, digits = 0, eps = .Machine$double.eps) {\n round(x + x * eps, digits = digits)\n}\n```\n:::\n\n\nLike `round_half_up()`, it also contains the two kinds of incorrect results. And like `round_half_up()`, a small decimal is added to make 0.xx5 round up. The parameter `eps` is provided to let user decide which small decimal to add.\n\nTo avoid the rounding issue totally, the only way is to increase precision, e.g. using package `Rmpfr`. It will need CPU resource. And it's not always necessary considering the accuracy of current functions.\n\n## round5() from package cards\n\nThe `cards::round5()` package does the same rounding as the `janitor::round_half_up()`.\n\n## Conclusion\n\nSo far, `round_half_up()` from package janitor (or `cards::round5()` ) is still one of the best solutions to round away from zero, though users may meet incorrect results in rare cases when the numbers are big or the decimal is long.\n\n\n::: {.cell}\n\n```{.r .cell-code}\noptions(digits = 7) # This just returns the number of displayed digits back to the default\n```\n:::\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P janitor * 2.2.1 2024-12-22 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/sample_size_average_bioequivalence/execute-results/html.json b/_freeze/R/sample_size_average_bioequivalence/execute-results/html.json index aedbc0493..8e5a958c5 100644 --- a/_freeze/R/sample_size_average_bioequivalence/execute-results/html.json +++ b/_freeze/R/sample_size_average_bioequivalence/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "8d05c1ef3e75b9687280e336a1ea8d49", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Sample Size Calculation for Average Bioequivalence\"\n---\n\n## Regulatory Requirements\n\nThe most unambiguous requirements are mentioned in [FDA Guidance for Industry. Statistical Approaches to Establishing Bioequivalence](https://www.fda.gov/media/70958/download):\n\n> Sample sizes for average BE should be obtained using published formulas. Sample sizes for population and individual BE should be based on simulated data. The simulations should be conducted using a default situation allowing the two formulations to vary as much as 5% in average BA with equal variances and certain magnitude of subject-by-formulation interaction. The study should have 80 or 90% power to conclude BE between these two formulations. Sample size also depends on the magnitude of variability and the design of the study. Variance estimates to determine the number of subjects for a specific drug can be obtained from the biomedical literature and/or pilot studies.\n\nAppropriate method is described in `Diletti D, Hauschke D, Steinijans VW. Sample Size Determination for Bioequivalence Assessment by Means of Confidence Intervals. Int J Clin Pharmacol Ther Toxicol. 1991;29(1):1–8` and implemented in R package [PowerTOST](https://cran.r-project.org/web/packages/PowerTOST/index.html) with one clarification: it is simulation-based (iterative) procedure rather than simple calculation by formula.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(PowerTOST)\nlibrary(knitr)\nlibrary(data.table)\nlibrary(purrr)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'purrr'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:data.table':\n\n transpose\n```\n\n\n:::\n:::\n\n\n\n## Sample size for standard crossover design (2x2x2) and 4 period full replicate design (2x2x4)\n\n`sampleN.TOST()` function can calculate sample size for different designs:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkable(PowerTOST::known.designs())\n```\n\n::: {.cell-output-display}\n\n\n| no|design |df |df2 | steps| bk|bknif | bkni|name |\n|---:|:--------|:-----|:---|-----:|---:|:-----|---------:|:---------------------------|\n| 0|parallel |n-2 |n-2 | 2| 4.0|1/1 | 1.0000000|2 parallel groups |\n| 1|2x2 |n-2 |n-2 | 2| 2.0|1/2 | 0.5000000|2x2 crossover |\n| 1|2x2x2 |n-2 |n-2 | 2| 2.0|1/2 | 0.5000000|2x2x2 crossover |\n| 2|3x3 |2*n-4 |n-3 | 3| 2.0|2/9 | 0.2222222|3x3 crossover |\n| 3|3x6x3 |2*n-4 |n-6 | 6| 2.0|1/18 | 0.0555556|3x6x3 crossover |\n| 4|4x4 |3*n-6 |n-4 | 4| 2.0|1/8 | 0.1250000|4x4 crossover |\n| 5|2x2x3 |2*n-3 |n-2 | 2| 1.5|3/8 | 0.3750000|2x2x3 replicate crossover |\n| 6|2x2x4 |3*n-4 |n-2 | 2| 1.0|1/4 | 0.2500000|2x2x4 replicate crossover |\n| 7|2x4x4 |3*n-4 |n-4 | 4| 1.0|1/16 | 0.0625000|2x4x4 replicate crossover |\n| 9|2x3x3 |2*n-3 |n-3 | 3| 1.5|1/6 | 0.1666667|partial replicate (2x3x3) |\n| 10|2x4x2 |n-2 |n-2 | 4| 8.0|1/2 | 0.5000000|Balaam's (2x4x2) |\n| 11|2x2x2r |3*n-2 |n-2 | 2| 1.0|1/4 | 0.2500000|Liu's 2x2x2 repeated x-over |\n| 100|paired |n-1 |n-1 | 1| 2.0|2/1 | 2.0000000|paired means |\n\n\n:::\n:::\n\n\nBasic usage: we should specify `targetpower` (power to achieve at least, e.g. 0.8 or 0.9), `theta0` (T/R ratio if `logscale = TRUE` which is convenient default value) and `cv` (coefficient of variation given as ratio if `logscale = TRUE`).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2x2x2\nPowerTOST::sampleN.TOST(\n targetpower = 0.8,\n theta0 = 0.95,\n CV = 0.3,\n design = \"2x2x2\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n+++++++++++ Equivalence test - TOST +++++++++++\n Sample size estimation\n-----------------------------------------------\nStudy design: 2x2 crossover \nlog-transformed data (multiplicative model)\n\nalpha = 0.05, target power = 0.8\nBE margins = 0.8 ... 1.25 \nTrue ratio = 0.95, CV = 0.3\n\nSample size (total)\n n power\n40 0.815845 \n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2x2x4\nPowerTOST::sampleN.TOST(\n targetpower = 0.9,\n theta0 = 0.98,\n CV = 0.24,\n design = \"2x2x4\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n+++++++++++ Equivalence test - TOST +++++++++++\n Sample size estimation\n-----------------------------------------------\nStudy design: 2x2x4 (4 period full replicate) \nlog-transformed data (multiplicative model)\n\nalpha = 0.05, target power = 0.9\nBE margins = 0.8 ... 1.25 \nTrue ratio = 0.98, CV = 0.24\n\nSample size (total)\n n power\n14 0.917492 \n```\n\n\n:::\n:::\n\n\nNote that total (not per-sequence) sample size is given.\n\n`alpha` (one-sided significance level, default is 0.05) almost never needs to be changed, `theta1` (lower bioequivalence limit) and `theta2` (upper bioequivalence limit) can be changed for non-standard bioequivalence limits, e.g. for narrow therapeutic index drugs.\n\n## Reproduction of Table 1 from FDA Guidance for Industry. Statistical Approaches to Establishing Bioequivalence\n\nReproduction of Table 1 from [FDA Guidance for Industry. Statistical Approaches to Establishing Bioequivalence](https://www.fda.gov/media/70958/download) is quite tricky because it consists one more parameter to consider - the subject-by-formulation interaction variance component, $\\sigma_D^2$. \n\n$$\\sigma_D^2=(\\sigma_{BT}-\\sigma_{BR})^2+2\\times(1-\\rho)\\times\\sigma_{BT}\\times\\sigma_{BR}$$\nwhere $\\sigma_{BT}^2$ and $\\sigma_{BR}^2$ are between-subject variances for the T and R formulations, respectively and $\\rho$ is correlation between subject-specific means $\\mu_{Tj}$ and $\\mu_{Rj}$. These parameters are rarely reported in publications and can't be estimated from CI boundaries and sample size. In such lack of information one can assume $\\sigma_{BT}=\\sigma_{BR}$ as well as $\\rho=1$. Under these reasonable assumptions $\\sigma_D^2=\\sigma_D=0$, so `sampleN.TOST()` calculation should be correct. \n\n\n::: {.cell}\n\n```{.r .cell-code}\ntargetpower <- c(0.8, 0.9)\ntheta0 <- 1 - 0.05\nCV <- c(0.15, 0.23, 0.3, 0.5)\ndesign <- c(\"2x2x2\", \"2x2x4\")\n\ndt <- data.table::CJ(CV, targetpower, design, theta0)\n\nsample_size <- purrr::pmap(dt, PowerTOST::sampleN.TOST, print = FALSE)\nkable(rbindlist(sample_size))\n```\n\n::: {.cell-output-display}\n\n\n|Design | alpha| CV| theta0| theta1| theta2| Sample size| Achieved power| Target power|\n|:------|-----:|----:|------:|------:|------:|-----------:|--------------:|------------:|\n|2x2x2 | 0.05| 0.15| 0.95| 0.8| 1.25| 12| 0.8305164| 0.8|\n|2x2x4 | 0.05| 0.15| 0.95| 0.8| 1.25| 6| 0.8458307| 0.8|\n|2x2x2 | 0.05| 0.15| 0.95| 0.8| 1.25| 16| 0.9260211| 0.9|\n|2x2x4 | 0.05| 0.15| 0.95| 0.8| 1.25| 8| 0.9328881| 0.9|\n|2x2x2 | 0.05| 0.23| 0.95| 0.8| 1.25| 24| 0.8066535| 0.8|\n|2x2x4 | 0.05| 0.23| 0.95| 0.8| 1.25| 12| 0.8143816| 0.8|\n|2x2x2 | 0.05| 0.23| 0.95| 0.8| 1.25| 32| 0.9044320| 0.9|\n|2x2x4 | 0.05| 0.23| 0.95| 0.8| 1.25| 16| 0.9082552| 0.9|\n|2x2x2 | 0.05| 0.30| 0.95| 0.8| 1.25| 40| 0.8158453| 0.8|\n|2x2x4 | 0.05| 0.30| 0.95| 0.8| 1.25| 20| 0.8202398| 0.8|\n|2x2x2 | 0.05| 0.30| 0.95| 0.8| 1.25| 52| 0.9019652| 0.9|\n|2x2x4 | 0.05| 0.30| 0.95| 0.8| 1.25| 26| 0.9043064| 0.9|\n|2x2x2 | 0.05| 0.50| 0.95| 0.8| 1.25| 98| 0.8032172| 0.8|\n|2x2x4 | 0.05| 0.50| 0.95| 0.8| 1.25| 50| 0.8128063| 0.8|\n|2x2x2 | 0.05| 0.50| 0.95| 0.8| 1.25| 132| 0.9012316| 0.9|\n|2x2x4 | 0.05| 0.50| 0.95| 0.8| 1.25| 66| 0.9021398| 0.9|\n\n\n:::\n:::\n\n\nAs we can see, calculated values are equal to the reference ones for smallest $\\sigma_D=0.01$ if CV=0.15 and CV=0.23. If CV=0.30 and power 80%, sample sizes are also equal, but for other parameters combinations sample sizes are underestimated. \n\nConclusion: we can trust `sampleN.TOST()`; for CV less or equal 0.30 with power 80% and for CV less or equal 0.23 with power 90% it can be considered as validated against reference from FDA guidance.\n\n\n## Estimate CV from CI boundaries and sample size\n\nCV can be calculated from CI boundaries and sample size if only these values are available:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPowerTOST::CVfromCI(lower = 0.95, upper = 1.11, n = 38)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.2029806\n```\n\n\n:::\n:::\n\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n\n```{.r .cell-code}\nsessionInfo()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nR version 4.5.2 (2025-10-31)\nPlatform: aarch64-apple-darwin20\nRunning under: macOS Tahoe 26.3\n\nMatrix products: default\nBLAS: /System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/libBLAS.dylib \nLAPACK: /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/lib/libRlapack.dylib; LAPACK version 3.12.1\n\nlocale:\n[1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8\n\ntime zone: Europe/London\ntzcode source: internal\n\nattached base packages:\n[1] stats graphics grDevices datasets utils methods base \n\nother attached packages:\n[1] purrr_1.2.1 data.table_1.18.2.1 knitr_1.51 \n[4] PowerTOST_1.5-7 \n\nloaded via a namespace (and not attached):\n [1] cubature_2.1.4-1 digest_0.6.39 fastmap_1.2.0 xfun_0.56 \n [5] magrittr_2.0.4 htmltools_0.5.9 rmarkdown_2.30 lifecycle_1.0.5 \n [9] mvtnorm_1.3-3 cli_3.6.5 vctrs_0.7.1 renv_1.0.10 \n[13] compiler_4.5.2 tools_4.5.2 evaluate_1.0.5 Rcpp_1.1.1 \n[17] yaml_2.3.12 otel_0.2.0 rlang_1.1.7 jsonlite_2.0.0 \n[21] htmlwidgets_1.6.4\n```\n\n\n:::\n:::\n\n\n:::\n", + "markdown": "---\ntitle: \"Sample Size Calculation for Average Bioequivalence\"\n---\n\n## Regulatory Requirements\n\nThe most unambiguous requirements are mentioned in [FDA Guidance for Industry. Statistical Approaches to Establishing Bioequivalence](https://www.fda.gov/media/70958/download):\n\n> Sample sizes for average BE should be obtained using published formulas. Sample sizes for population and individual BE should be based on simulated data. The simulations should be conducted using a default situation allowing the two formulations to vary as much as 5% in average BA with equal variances and certain magnitude of subject-by-formulation interaction. The study should have 80 or 90% power to conclude BE between these two formulations. Sample size also depends on the magnitude of variability and the design of the study. Variance estimates to determine the number of subjects for a specific drug can be obtained from the biomedical literature and/or pilot studies.\n\nAppropriate method is described in `Diletti D, Hauschke D, Steinijans VW. Sample Size Determination for Bioequivalence Assessment by Means of Confidence Intervals. Int J Clin Pharmacol Ther Toxicol. 1991;29(1):1–8` and implemented in R package [PowerTOST](https://cran.r-project.org/web/packages/PowerTOST/index.html) with one clarification: it is simulation-based (iterative) procedure rather than simple calculation by formula.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(PowerTOST)\nlibrary(knitr)\nlibrary(data.table)\nlibrary(purrr)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\nAttaching package: 'purrr'\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nThe following object is masked from 'package:data.table':\n\n transpose\n```\n\n\n:::\n:::\n\n\n\n## Sample size for standard crossover design (2x2x2) and 4 period full replicate design (2x2x4)\n\n`sampleN.TOST()` function can calculate sample size for different designs:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkable(PowerTOST::known.designs())\n```\n\n::: {.cell-output-display}\n\n\n| no|design |df |df2 | steps| bk|bknif | bkni|name |\n|---:|:--------|:-----|:---|-----:|---:|:-----|---------:|:---------------------------|\n| 0|parallel |n-2 |n-2 | 2| 4.0|1/1 | 1.0000000|2 parallel groups |\n| 1|2x2 |n-2 |n-2 | 2| 2.0|1/2 | 0.5000000|2x2 crossover |\n| 1|2x2x2 |n-2 |n-2 | 2| 2.0|1/2 | 0.5000000|2x2x2 crossover |\n| 2|3x3 |2*n-4 |n-3 | 3| 2.0|2/9 | 0.2222222|3x3 crossover |\n| 3|3x6x3 |2*n-4 |n-6 | 6| 2.0|1/18 | 0.0555556|3x6x3 crossover |\n| 4|4x4 |3*n-6 |n-4 | 4| 2.0|1/8 | 0.1250000|4x4 crossover |\n| 5|2x2x3 |2*n-3 |n-2 | 2| 1.5|3/8 | 0.3750000|2x2x3 replicate crossover |\n| 6|2x2x4 |3*n-4 |n-2 | 2| 1.0|1/4 | 0.2500000|2x2x4 replicate crossover |\n| 7|2x4x4 |3*n-4 |n-4 | 4| 1.0|1/16 | 0.0625000|2x4x4 replicate crossover |\n| 9|2x3x3 |2*n-3 |n-3 | 3| 1.5|1/6 | 0.1666667|partial replicate (2x3x3) |\n| 10|2x4x2 |n-2 |n-2 | 4| 8.0|1/2 | 0.5000000|Balaam's (2x4x2) |\n| 11|2x2x2r |3*n-2 |n-2 | 2| 1.0|1/4 | 0.2500000|Liu's 2x2x2 repeated x-over |\n| 100|paired |n-1 |n-1 | 1| 2.0|2/1 | 2.0000000|paired means |\n\n\n:::\n:::\n\n\nBasic usage: we should specify `targetpower` (power to achieve at least, e.g. 0.8 or 0.9), `theta0` (T/R ratio if `logscale = TRUE` which is convenient default value) and `cv` (coefficient of variation given as ratio if `logscale = TRUE`).\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2x2x2\nPowerTOST::sampleN.TOST(\n targetpower = 0.8,\n theta0 = 0.95,\n CV = 0.3,\n design = \"2x2x2\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n+++++++++++ Equivalence test - TOST +++++++++++\n Sample size estimation\n-----------------------------------------------\nStudy design: 2x2 crossover \nlog-transformed data (multiplicative model)\n\nalpha = 0.05, target power = 0.8\nBE margins = 0.8 ... 1.25 \nTrue ratio = 0.95, CV = 0.3\n\nSample size (total)\n n power\n40 0.815845 \n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2x2x4\nPowerTOST::sampleN.TOST(\n targetpower = 0.9,\n theta0 = 0.98,\n CV = 0.24,\n design = \"2x2x4\"\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n+++++++++++ Equivalence test - TOST +++++++++++\n Sample size estimation\n-----------------------------------------------\nStudy design: 2x2x4 (4 period full replicate) \nlog-transformed data (multiplicative model)\n\nalpha = 0.05, target power = 0.9\nBE margins = 0.8 ... 1.25 \nTrue ratio = 0.98, CV = 0.24\n\nSample size (total)\n n power\n14 0.917492 \n```\n\n\n:::\n:::\n\n\nNote that total (not per-sequence) sample size is given.\n\n`alpha` (one-sided significance level, default is 0.05) almost never needs to be changed, `theta1` (lower bioequivalence limit) and `theta2` (upper bioequivalence limit) can be changed for non-standard bioequivalence limits, e.g. for narrow therapeutic index drugs.\n\n## Reproduction of Table 1 from FDA Guidance for Industry. Statistical Approaches to Establishing Bioequivalence\n\nReproduction of Table 1 from [FDA Guidance for Industry. Statistical Approaches to Establishing Bioequivalence](https://www.fda.gov/media/70958/download) is quite tricky because it consists one more parameter to consider - the subject-by-formulation interaction variance component, $\\sigma_D^2$. \n\n$$\\sigma_D^2=(\\sigma_{BT}-\\sigma_{BR})^2+2\\times(1-\\rho)\\times\\sigma_{BT}\\times\\sigma_{BR}$$\nwhere $\\sigma_{BT}^2$ and $\\sigma_{BR}^2$ are between-subject variances for the T and R formulations, respectively and $\\rho$ is correlation between subject-specific means $\\mu_{Tj}$ and $\\mu_{Rj}$. These parameters are rarely reported in publications and can't be estimated from CI boundaries and sample size. In such lack of information one can assume $\\sigma_{BT}=\\sigma_{BR}$ as well as $\\rho=1$. Under these reasonable assumptions $\\sigma_D^2=\\sigma_D=0$, so `sampleN.TOST()` calculation should be correct. \n\n\n::: {.cell}\n\n```{.r .cell-code}\ntargetpower <- c(0.8, 0.9)\ntheta0 <- 1 - 0.05\nCV <- c(0.15, 0.23, 0.3, 0.5)\ndesign <- c(\"2x2x2\", \"2x2x4\")\n\ndt <- data.table::CJ(CV, targetpower, design, theta0)\n\nsample_size <- purrr::pmap(dt, PowerTOST::sampleN.TOST, print = FALSE)\nkable(rbindlist(sample_size))\n```\n\n::: {.cell-output-display}\n\n\n|Design | alpha| CV| theta0| theta1| theta2| Sample size| Achieved power| Target power|\n|:------|-----:|----:|------:|------:|------:|-----------:|--------------:|------------:|\n|2x2x2 | 0.05| 0.15| 0.95| 0.8| 1.25| 12| 0.8305164| 0.8|\n|2x2x4 | 0.05| 0.15| 0.95| 0.8| 1.25| 6| 0.8458307| 0.8|\n|2x2x2 | 0.05| 0.15| 0.95| 0.8| 1.25| 16| 0.9260211| 0.9|\n|2x2x4 | 0.05| 0.15| 0.95| 0.8| 1.25| 8| 0.9328881| 0.9|\n|2x2x2 | 0.05| 0.23| 0.95| 0.8| 1.25| 24| 0.8066535| 0.8|\n|2x2x4 | 0.05| 0.23| 0.95| 0.8| 1.25| 12| 0.8143816| 0.8|\n|2x2x2 | 0.05| 0.23| 0.95| 0.8| 1.25| 32| 0.9044320| 0.9|\n|2x2x4 | 0.05| 0.23| 0.95| 0.8| 1.25| 16| 0.9082552| 0.9|\n|2x2x2 | 0.05| 0.30| 0.95| 0.8| 1.25| 40| 0.8158453| 0.8|\n|2x2x4 | 0.05| 0.30| 0.95| 0.8| 1.25| 20| 0.8202398| 0.8|\n|2x2x2 | 0.05| 0.30| 0.95| 0.8| 1.25| 52| 0.9019652| 0.9|\n|2x2x4 | 0.05| 0.30| 0.95| 0.8| 1.25| 26| 0.9043064| 0.9|\n|2x2x2 | 0.05| 0.50| 0.95| 0.8| 1.25| 98| 0.8032172| 0.8|\n|2x2x4 | 0.05| 0.50| 0.95| 0.8| 1.25| 50| 0.8128063| 0.8|\n|2x2x2 | 0.05| 0.50| 0.95| 0.8| 1.25| 132| 0.9012316| 0.9|\n|2x2x4 | 0.05| 0.50| 0.95| 0.8| 1.25| 66| 0.9021398| 0.9|\n\n\n:::\n:::\n\n\nAs we can see, calculated values are equal to the reference ones for smallest $\\sigma_D=0.01$ if CV=0.15 and CV=0.23. If CV=0.30 and power 80%, sample sizes are also equal, but for other parameters combinations sample sizes are underestimated. \n\nConclusion: we can trust `sampleN.TOST()`; for CV less or equal 0.30 with power 80% and for CV less or equal 0.23 with power 90% it can be considered as validated against reference from FDA guidance.\n\n\n## Estimate CV from CI boundaries and sample size\n\nCV can be calculated from CI boundaries and sample size if only these values are available:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nPowerTOST::CVfromCI(lower = 0.95, upper = 1.11, n = 38)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.2029806\n```\n\n\n:::\n:::\n\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n\n```{.r .cell-code}\nsessionInfo()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nR version 4.5.2 (2025-10-31)\nPlatform: x86_64-pc-linux-gnu\nRunning under: Ubuntu 24.04.3 LTS\n\nMatrix products: default\nBLAS: /usr/lib/x86_64-linux-gnu/openblas-pthread/libblas.so.3 \nLAPACK: /usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.26.so; LAPACK version 3.12.0\n\nlocale:\n [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C \n [3] LC_TIME=en_US.UTF-8 LC_COLLATE=en_US.UTF-8 \n [5] LC_MONETARY=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 \n [7] LC_PAPER=en_US.UTF-8 LC_NAME=C \n [9] LC_ADDRESS=C LC_TELEPHONE=C \n[11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C \n\ntime zone: Europe/London\ntzcode source: system (glibc)\n\nattached base packages:\n[1] stats graphics grDevices datasets utils methods base \n\nother attached packages:\n[1] purrr_1.2.1 data.table_1.18.2.1 knitr_1.51 \n[4] PowerTOST_1.5-7 \n\nloaded via a namespace (and not attached):\n [1] cubature_2.1.4-1 digest_0.6.39 fastmap_1.2.0 xfun_0.56 \n [5] magrittr_2.0.4 htmltools_0.5.9 rmarkdown_2.30 lifecycle_1.0.5 \n [9] mvtnorm_1.3-3 cli_3.6.5 vctrs_0.7.1 renv_1.0.10 \n[13] compiler_4.5.2 tools_4.5.2 evaluate_1.0.5 Rcpp_1.1.1 \n[17] yaml_2.3.12 otel_0.2.0 rlang_1.1.7 jsonlite_2.0.0 \n[21] htmlwidgets_1.6.4\n```\n\n\n:::\n:::\n\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/sample_size_non-inferiority/execute-results/html.json b/_freeze/R/sample_size_non-inferiority/execute-results/html.json index 150d5206f..6168e3350 100644 --- a/_freeze/R/sample_size_non-inferiority/execute-results/html.json +++ b/_freeze/R/sample_size_non-inferiority/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "80fdd01e72cc62f99a1d49ad00eb00ec", + "hash": "d4a861a71874a803588b78942176549a", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Sample Size for Non-Inferiority Trials in R\"\n---\n\nIn R there are lots of packages for sample size calculations. Here we will cover **TrialSize** , **epiR** and **rpact**.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(rpact)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nInstallation qualification for rpact 4.3.0 has not yet been performed.\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nPlease run testPackage() before using the package in GxP relevant environments.\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(epiR)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: survival\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nPackage epiR 2.0.90 is loaded\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nType help(epi.about) for summary information\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\nType browseVignettes(package = 'epiR') to learn how to use epiR for applied epidemiological analyses\n```\n\n\n:::\n\n::: {.cell-output .cell-output-stderr}\n\n```\n\n```\n\n\n:::\n\n```{.r .cell-code}\nlibrary(TrialSize)\n```\n:::\n\n\n### Two Sample Non-inferiority test: Comparing means for parallel design (unpaired)\n\nThis example is a sample size calculation for the following hypotheses: $H_0:\\mu2-\\mu1\\le -\\theta$ versus $H_1: \\mu2-\\mu1\\gt -\\theta$.\n\n#### Example\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients through a parallel design. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). We will consider the situation where the intended trial is for testing noninferiority. For establishing it, suppose the true mean difference is 0 and the noninferiority margin is chosen to be -0.05 (-5%). Assuming SD = 0.1, how many patients are required for an 80% power and an overall significance level of 5%?\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# rpact:\nrpact::getDesignInverseNormal(kMax = 1, alpha = 0.05) |>\n rpact::getSampleSizeMeans(\n thetaH0 = -0.05,\n alternative = 0,\n stDev = 0.1,\n allocationRatioPlanned = 1\n ) |>\n summary()\n```\n\n::: {.cell-output-display}\n*Sample size calculation for a continuous endpoint*\n\nFixed sample analysis, one-sided significance level 5%, power 80%.\nThe results were calculated for a two-sample t-test, H0: mu(1) - mu(2) = -0.05, \nH1: effect = 0, standard deviation = 0.1.\n\n| Stage | Fixed |\n| ----- | ----- |\n| Stage level (one-sided) | 0.0500 |\n| Efficacy boundary (z-value scale) | 1.645 |\n| Efficacy boundary (t) | -0.017 |\n| Number of subjects | 100.3 |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n\nHere the recommended sample size is 100.3, so we will need to round up to 51 subjects per arm.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# epiR:\nepi.ssninfc(treat = 0.20, control = 0.20, sigma = 0.10, delta = 0.05, n = NA, \n power = 0.80, r = 1, nfractional = FALSE, alpha = 0.05)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$n.total\n[1] 100\n\n$n.treat\n[1] 50\n\n$n.control\n[1] 50\n\n$delta\n[1] 0.05\n\n$power\n[1] 0.8\n```\n\n\n:::\n\n```{.r .cell-code}\n# TrialSize:\nTwoSampleMean.NIS(0.05,0.2,0.1,1,-0.05,0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 49.46046\n```\n\n\n:::\n:::\n\n\nIn epiR, when calculating the power of a study, the argument n refers to the total study size (that is, the number of subjects in the treatment group plus the number in the control group). Hence, a total of 100 subjects is needed to be enrolled in the trial, 50 in the treatment group and 50 in the control group. TrialSize presents the results in the same format.\n\n\n### Comparing means for crossover design (paired) {.unnumbered}\n\n#### Example\n\nLet's consider a standard two-sequence, two period crossover design. Suppose that the sponsor is interested in showing noninferiority of the test drug against the reference with the noninferiority margin -20%. Assume power of 80%. Based on the results from previous trials, it is estimated that the variance (of the difference) is 0.2 (20%). Suppose that the true mean difference is -0.1 (-10%). What is the required sample size, assuming significance level of 5%?\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# TrialSize:\nTwoSampleCrossOver.NIS(0.05,0.2,0.2,-0.2,-0.1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 12.36511\n```\n\n\n:::\n:::\n\n\n#### References {.unnumbered}\n\nMajority of the examples are taken from: *Chow SC, Liu JP (1998). Design and analysis of clinical trials. Concepts and methodologies. Wiley, New York.* and *Machin, D., Campbell, M. J., Fayers, P., & Pinol, A. (Eds.) (1997). Sample Size Tables for Clinical Studies. (2nd ed.) Blackwell Science.*\n", + "markdown": "---\ntitle: \"Sample Size for Non-Inferiority Trials in R\"\n---\n\nIn R there are lots of packages for sample size calculations. Here we will cover **TrialSize** , **epiR** and **rpact**.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(rpact)\nlibrary(epiR)\nlibrary(TrialSize)\n```\n:::\n\n\n### Two Sample Non-inferiority test: Comparing means for parallel design (unpaired)\n\nThis example is a sample size calculation for the following hypotheses: $H_0:\\mu2-\\mu1\\le -\\theta$ versus $H_1: \\mu2-\\mu1\\gt -\\theta$.\n\n#### Example\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients through a parallel design. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). We will consider the situation where the intended trial is for testing noninferiority. For establishing it, suppose the true mean difference is 0 and the noninferiority margin is chosen to be -0.05 (-5%). Assuming SD = 0.1, how many patients are required for an 80% power and an overall significance level of 5%?\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# rpact:\nrpact::getDesignInverseNormal(kMax = 1, alpha = 0.05) |>\n rpact::getSampleSizeMeans(\n thetaH0 = -0.05,\n alternative = 0,\n stDev = 0.1,\n allocationRatioPlanned = 1\n ) |>\n summary()\n```\n\n::: {.cell-output-display}\n*Sample size calculation for a continuous endpoint*\n\nFixed sample analysis, one-sided significance level 5%, power 80%.\nThe results were calculated for a two-sample t-test, H0: mu(1) - mu(2) = -0.05, \nH1: effect = 0, standard deviation = 0.1.\n\n| Stage | Fixed |\n| ----- | ----- |\n| Stage level (one-sided) | 0.0500 |\n| Efficacy boundary (z-value scale) | 1.645 |\n| Efficacy boundary (t) | -0.017 |\n| Number of subjects | 100.3 |\n\nLegend:\n\n* *(t)*: treatment effect scale\n\n:::\n:::\n\n\nHere the recommended sample size is 100.3, so we will need to round up to 51 subjects per arm.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# epiR:\nepi.ssninfc(treat = 0.20, control = 0.20, sigma = 0.10, delta = 0.05, n = NA, \n power = 0.80, r = 1, nfractional = FALSE, alpha = 0.05)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$n.total\n[1] 100\n\n$n.treat\n[1] 50\n\n$n.control\n[1] 50\n\n$delta\n[1] 0.05\n\n$power\n[1] 0.8\n```\n\n\n:::\n\n```{.r .cell-code}\n# TrialSize:\nTwoSampleMean.NIS(0.05,0.2,0.1,1,-0.05,0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 49.46046\n```\n\n\n:::\n:::\n\n\nIn epiR, when calculating the power of a study, the argument n refers to the total study size (that is, the number of subjects in the treatment group plus the number in the control group). Hence, a total of 100 subjects is needed to be enrolled in the trial, 50 in the treatment group and 50 in the control group. TrialSize presents the results in the same format.\n\n\n### Comparing means for crossover design (paired) {.unnumbered}\n\n#### Example\n\nLet's consider a standard two-sequence, two period crossover design. Suppose that the sponsor is interested in showing noninferiority of the test drug against the reference with the noninferiority margin -20%. Assume power of 80%. Based on the results from previous trials, it is estimated that the variance (of the difference) is 0.2 (20%). Suppose that the true mean difference is -0.1 (-10%). What is the required sample size, assuming significance level of 5%?\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# TrialSize:\nTwoSampleCrossOver.NIS(0.05,0.2,0.2,-0.2,-0.1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 12.36511\n```\n\n\n:::\n:::\n\n\n#### References {.unnumbered}\n\nMajority of the examples are taken from: *Chow SC, Liu JP (1998). Design and analysis of clinical trials. Concepts and methodologies. Wiley, New York.* and *Machin, D., Campbell, M. J., Fayers, P., & Pinol, A. (Eds.) (1997). Sample Size Tables for Clinical Studies. (2nd ed.) Blackwell Science.*\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/summary-stats/execute-results/html.json b/_freeze/R/summary-stats/execute-results/html.json index bffbd2a62..18662dfed 100644 --- a/_freeze/R/summary-stats/execute-results/html.json +++ b/_freeze/R/summary-stats/execute-results/html.json @@ -1,9 +1,11 @@ { - "hash": "18713beaa8b157b9465cf09ffa118dc4", + "hash": "9ea1bec115e9fc83b0e8d39a33600269", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Deriving Quantiles or Percentiles in R\"\n---\n\nPercentiles can be calculated in R using the quantile function. The function has the argument `type` which allows for nine different percentile definitions to be used. The default is `type = 7`, which uses a piecewise-linear estimate of the cumulative distribution function to find percentiles.\n\nThis is how the 25th and 40th percentiles of `aval` could be calculated using the default `type`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nquantile(aval, probs = c(0.25, 0.4))\n```\n:::\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n P otel 0.2.0 2025-08-29 [?] RSPM\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n P sessioninfo 1.2.3 2025-02-05 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n\n", - "supporting": [], + "markdown": "---\ntitle: \"Deriving Quantiles or Percentiles in R\"\n---\n\nPercentiles can be calculated in R using the quantile function. The function has the argument `type` which allows for nine different percentile definitions to be used. The default is `type = 7`, which uses a piecewise-linear estimate of the cumulative distribution function to find percentiles.\n\nThis is how the 25th and 40th percentiles of `aval` could be calculated using the default `type`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nquantile(aval, probs = c(0.25, 0.4))\n```\n:::\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.3 (2026-03-11)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P otel 0.2.0 2025-08-29 [?] RSPM (R 4.5.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.3)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.3/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\nNote: `geom_boxplot` and `stats_boxplot` in `ggplot2` uses the default of `type = 7`. It is not possible to switch the type of algorithm used to compute the quantiles in these functions for `ggplot2`.\n\n:::\n\n", + "supporting": [ + "summary-stats_files" + ], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/R/summary_skew_kurt/execute-results/html.json b/_freeze/R/summary_skew_kurt/execute-results/html.json index d05c4bd1b..7a60db464 100644 --- a/_freeze/R/summary_skew_kurt/execute-results/html.json +++ b/_freeze/R/summary_skew_kurt/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "678540494b8bdb2ef146a6054da5dda6", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Skewness/Kurtosis\"\noutput: html_document\n---\n\n\n\n# **Skewness and Kurtosis in R**\n\nSkewness measures the the amount of asymmetry in a distribution, while Kurtosis describes the \"tailedness\" of the curve. These measures are frequently used to assess the normality of the data. There are several methods to calculate these measures. In R, there are at least four different packages that contain functions for Skewness and Kurtosis. This write-up will examine the following packages: **e1071**, **moments**, **procs**, and **sasLM**.\n\n## Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\ndat <- tibble::tribble(\n ~team, ~points, ~assists,\n \"A\", 10, 2,\n \"A\", 17, 5,\n \"A\", 17, 6,\n \"A\", 18, 3,\n \"A\", 15, 0,\n \"B\", 10, 2,\n \"B\", 14, 5,\n \"B\", 13, 4,\n \"B\", 29, 0,\n \"B\", 25, 2,\n \"C\", 12, 1,\n \"C\", 30, 1,\n \"C\", 34, 3,\n \"C\", 12, 4,\n \"C\", 11, 7 \n)\n```\n:::\n\n\n## Package Examination\n\nBase R and the **stats** package have no native functions for Skewness and Kurtosis. It is therefore necessary to use a packaged function to calculate these statistics. The packages examined use three different methods of calculating Skewness, and four different methods for calculating Kurtosis. Of the available packages, the functions in the **e1071** package provide the most flexibility, and have options for three of the different methodologies.\n\n### e1071 Package {#e1071}\n\nThe **e1071** package contains miscellaneous statistical functions from the Probability Theory Group at the Vienna University of Technology. The package includes functions for both Skewness and Kurtosis, and each function has a \"type\" parameter to specify the method. There are three available methods for Skewness, and three methods for Kurtosis. A portion of the documentation for these functions is included below:\n\n#### Skewness\n\nThe documentation for the `skewness()` function describes three types of skewness calculations: Joanes and Gill (1998) discusses three methods for estimating skewness:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_1 = m_1/m_2^{3/2}$$\n\n- Type 2: Used in SAS and SPSS\n\n $$\n G_1 = g_1\\sqrt{n(n-1)}/(n-2)\n $$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$\n b_1 = m_3/s^3 = g_1((n-1)/n)^{3/2}\n $$\n\nAll three skewness measures are unbiased under normality. The three methods are illustrated in the following code:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntype1 <- e1071::skewness(dat$points, type = 1)\nstringr::str_glue(\"Skewness - Type 1: {type1}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSkewness - Type 1: 0.905444204379853\n```\n\n\n:::\n\n```{.r .cell-code}\ntype2 <- e1071::skewness(dat$points, type = 2)\nstringr::str_glue(\"Skewness - Type 2: {type2}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSkewness - Type 2: 1.00931792987094\n```\n\n\n:::\n\n```{.r .cell-code}\ntype3 <- e1071::skewness(dat$points, type = 3)\nstringr::str_glue(\"Skewness - Type 3: {type3}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSkewness - Type 3: 0.816426058828937\n```\n\n\n:::\n:::\n\n\nThe default for the **e1071** `skewness()` function is Type 3.\n\n#### Kurtosis\n\nThe documentation for the `kurtosis()` function describes three types of kurtosis calculations: Joanes and Gill (1998) discuss three methods for estimating kurtosis:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_2 = m_4/m_2^{2}-3$$\n\n- Type 2: Used in SAS and SPSS\n\n $$G_2 = ((n+1)g_2+6)*\\frac{(n-1)}{(n-2)(n-3)}$$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$b_2 = m_4/s^4-3 = (g_2 + 3)(1-1/n)^2-3$$\n\nOnly $G_2$ (corresponding to type 2) is unbiased under normality. The three methods are illustrated in the following code:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Kurtosis - Type 1\ntype1 <- e1071::kurtosis(dat$points, type = 1)\nstringr::str_glue(\"Kurtosis - Type 1: {type1}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nKurtosis - Type 1: -0.583341077124784\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Type 2\ntype2 <- e1071::kurtosis(dat$points, type = 2)\nstringr::str_glue(\"Kurtosis - Type 2: {type2}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nKurtosis - Type 2: -0.299156418435587\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Type 3\ntype3 <- e1071::kurtosis(dat$points, type = 3)\nstringr::str_glue(\"Kurtosis - Type 3: {type3}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nKurtosis - Type 3: -0.894821560517589\n```\n\n\n:::\n:::\n\n\nThe default for the **e1071** `kurtosis()` function is Type 3.\n\n### Moments Package {#moments}\n\nThe **moments** package is a well-known package with a variety of statistical functions. The package contains functions for both Skewness and Kurtosis. But these functions provide no \"type\" option. The `skewness()` function in the **moments** package corresponds to Type 1 above. The `kurtosis()` function uses a Pearson's measure of Kurtosis, which corresponds to none of the three types in the **e1071** package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(moments)\n\n# Skewness - Type 1\nmoments::skewness(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.9054442\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Pearson's measure\nmoments::kurtosis(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.416659\n```\n\n\n:::\n:::\n\n\nNote that neither of the functions from the **moments** package match SAS.\n\n### Procs Package {#procs}\n\nThe **procs** package `proc_means()` function was written specifically to match SAS, and produces a Type 2 Skewness and Type 2 Kurtosis. This package also produces a data frame output, instead of a scalar value.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(procs)\n\n# Skewness and Kurtosis - Type 2\nprocs::proc_means(dat, var = points, stats = v(skew, kurt))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 5\n TYPE FREQ VAR SKEW KURT\n \n1 0 15 points 1.01 -0.299\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/procs_skewness_kurtosis.png){fig-align='center' width=30%}\n:::\n:::\n\n\n### sasLM Package {#sasLM}\n\nThe **sasLM** package was also written specifically to match SAS. The `Skewness()` function produces a Type 2 Skewness, and the `Kurtosis()` function a Type 2 Kurtosis.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(sasLM)\n\n# Skewness - Type 2\nsasLM::Skewness(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 1.009318\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Type 2\nsasLM::Kurtosis(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] -0.2991564\n```\n\n\n:::\n:::\n\n\n## Reference\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n class 7.3-23 2025-01-01 [2] CRAN (R 4.5.2)\n P cli 3.6.5 2025-04-23 [?] RSPM\n P common * 1.1.4 2025-12-08 [?] RSPM\n P crayon 1.5.3 2024-06-20 [?] RSPM\n P e1071 * 1.7-17 2025-12-18 [?] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P fmtr 1.7.2 2026-01-25 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n highr 0.11 2024-05-26 [1] RSPM\n P jpeg 0.1-11 2025-03-21 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n P moments * 0.14.1 2022-05-02 [?] RSPM\n P mvtnorm * 1.3-3 2025-01-10 [?] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P procs * 1.0.7 2025-07-27 [?] RSPM\n P proxy 0.4-29 2025-12-29 [?] RSPM\n P Rcpp 1.1.1 2026-01-10 [?] RSPM\n P reporter 1.4.6 2026-02-07 [?] RSPM\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P sasLM * 0.10.7 2025-09-28 [?] RSPM\n P stringi 1.8.7 2025-03-27 [?] RSPM\n P stringr 1.6.0 2025-11-04 [?] RSPM\n P tibble * 3.3.1 2026-01-11 [?] RSPM\n P utf8 1.2.6 2025-06-08 [?] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n P zip 2.3.3 2025-05-13 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"Skewness/Kurtosis\"\noutput: html_document\n---\n\n\n\n# **Skewness and Kurtosis in R**\n\nSkewness measures the the amount of asymmetry in a distribution, while Kurtosis describes the \"tailedness\" of the curve. These measures are frequently used to assess the normality of the data. There are several methods to calculate these measures. In R, there are at least four different packages that contain functions for Skewness and Kurtosis. This write-up will examine the following packages: **e1071**, **moments**, **procs**, and **sasLM**.\n\n## Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\ndat <- tibble::tribble(\n ~team, ~points, ~assists,\n \"A\", 10, 2,\n \"A\", 17, 5,\n \"A\", 17, 6,\n \"A\", 18, 3,\n \"A\", 15, 0,\n \"B\", 10, 2,\n \"B\", 14, 5,\n \"B\", 13, 4,\n \"B\", 29, 0,\n \"B\", 25, 2,\n \"C\", 12, 1,\n \"C\", 30, 1,\n \"C\", 34, 3,\n \"C\", 12, 4,\n \"C\", 11, 7 \n)\n```\n:::\n\n\n## Package Examination\n\nBase R and the **stats** package have no native functions for Skewness and Kurtosis. It is therefore necessary to use a packaged function to calculate these statistics. The packages examined use three different methods of calculating Skewness, and four different methods for calculating Kurtosis. Of the available packages, the functions in the **e1071** package provide the most flexibility, and have options for three of the different methodologies.\n\n### e1071 Package {#e1071}\n\nThe **e1071** package contains miscellaneous statistical functions from the Probability Theory Group at the Vienna University of Technology. The package includes functions for both Skewness and Kurtosis, and each function has a \"type\" parameter to specify the method. There are three available methods for Skewness, and three methods for Kurtosis. A portion of the documentation for these functions is included below:\n\n#### Skewness\n\nThe documentation for the `skewness()` function describes three types of skewness calculations: Joanes and Gill (1998) discusses three methods for estimating skewness:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_1 = m_1/m_2^{3/2}$$\n\n- Type 2: Used in SAS and SPSS\n\n $$\n G_1 = g_1\\sqrt{n(n-1)}/(n-2)\n $$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$\n b_1 = m_3/s^3 = g_1((n-1)/n)^{3/2}\n $$\n\nAll three skewness measures are unbiased under normality. The three methods are illustrated in the following code:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntype1 <- e1071::skewness(dat$points, type = 1)\nstringr::str_glue(\"Skewness - Type 1: {type1}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSkewness - Type 1: 0.905444204379853\n```\n\n\n:::\n\n```{.r .cell-code}\ntype2 <- e1071::skewness(dat$points, type = 2)\nstringr::str_glue(\"Skewness - Type 2: {type2}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSkewness - Type 2: 1.00931792987094\n```\n\n\n:::\n\n```{.r .cell-code}\ntype3 <- e1071::skewness(dat$points, type = 3)\nstringr::str_glue(\"Skewness - Type 3: {type3}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nSkewness - Type 3: 0.816426058828937\n```\n\n\n:::\n:::\n\n\nThe default for the **e1071** `skewness()` function is Type 3.\n\n#### Kurtosis\n\nThe documentation for the `kurtosis()` function describes three types of kurtosis calculations: Joanes and Gill (1998) discuss three methods for estimating kurtosis:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_2 = m_4/m_2^{2}-3$$\n\n- Type 2: Used in SAS and SPSS\n\n $$G_2 = ((n+1)g_2+6)*\\frac{(n-1)}{(n-2)(n-3)}$$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$b_2 = m_4/s^4-3 = (g_2 + 3)(1-1/n)^2-3$$\n\nOnly $G_2$ (corresponding to type 2) is unbiased under normality. The three methods are illustrated in the following code:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Kurtosis - Type 1\ntype1 <- e1071::kurtosis(dat$points, type = 1)\nstringr::str_glue(\"Kurtosis - Type 1: {type1}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nKurtosis - Type 1: -0.583341077124784\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Type 2\ntype2 <- e1071::kurtosis(dat$points, type = 2)\nstringr::str_glue(\"Kurtosis - Type 2: {type2}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nKurtosis - Type 2: -0.299156418435587\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Type 3\ntype3 <- e1071::kurtosis(dat$points, type = 3)\nstringr::str_glue(\"Kurtosis - Type 3: {type3}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nKurtosis - Type 3: -0.894821560517589\n```\n\n\n:::\n:::\n\n\nThe default for the **e1071** `kurtosis()` function is Type 3.\n\n### Moments Package {#moments}\n\nThe **moments** package is a well-known package with a variety of statistical functions. The package contains functions for both Skewness and Kurtosis. But these functions provide no \"type\" option. The `skewness()` function in the **moments** package corresponds to Type 1 above. The `kurtosis()` function uses a Pearson's measure of Kurtosis, which corresponds to none of the three types in the **e1071** package.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(moments)\n\n# Skewness - Type 1\nmoments::skewness(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.9054442\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Pearson's measure\nmoments::kurtosis(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 2.416659\n```\n\n\n:::\n:::\n\n\nNote that neither of the functions from the **moments** package match SAS.\n\n### Procs Package {#procs}\n\nThe **procs** package `proc_means()` function was written specifically to match SAS, and produces a Type 2 Skewness and Type 2 Kurtosis. This package also produces a data frame output, instead of a scalar value.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(procs)\n\n# Skewness and Kurtosis - Type 2\nprocs::proc_means(dat, var = points, stats = v(skew, kurt))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 5\n TYPE FREQ VAR SKEW KURT\n \n1 0 15 points 1.01 -0.299\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/procs_skewness_kurtosis.png){fig-align='center' width=30%}\n:::\n:::\n\n\n### sasLM Package {#sasLM}\n\nThe **sasLM** package was also written specifically to match SAS. The `Skewness()` function produces a Type 2 Skewness, and the `Kurtosis()` function a Type 2 Kurtosis.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(sasLM)\n\n# Skewness - Type 2\nsasLM::Skewness(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 1.009318\n```\n\n\n:::\n\n```{.r .cell-code}\n# Kurtosis - Type 2\nsasLM::Kurtosis(dat$points)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] -0.2991564\n```\n\n\n:::\n:::\n\n\n## Reference\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n class 7.3-23 2025-01-01 [2] CRAN (R 4.5.2)\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n P common * 1.1.4 2025-12-08 [?] RSPM (R 4.5.0)\n P crayon 1.5.3 2024-06-20 [?] RSPM (R 4.5.0)\n P e1071 * 1.7-17 2025-12-18 [?] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P fmtr 1.7.2 2026-01-25 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n highr 0.11 2024-05-26 [1] RSPM (R 4.5.0)\n P jpeg 0.1-11 2025-03-21 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n P moments * 0.14.1 2022-05-02 [?] RSPM (R 4.5.0)\n P mvtnorm * 1.3-3 2025-01-10 [?] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P procs * 1.0.7 2025-07-27 [?] RSPM (R 4.5.0)\n P proxy 0.4-29 2025-12-29 [?] RSPM (R 4.5.0)\n P Rcpp 1.1.1 2026-01-10 [?] RSPM (R 4.5.0)\n P reporter 1.4.6 2026-02-07 [?] RSPM (R 4.5.0)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P sasLM * 0.10.7 2025-09-28 [?] RSPM (R 4.5.0)\n P stringi 1.8.7 2025-03-27 [?] RSPM (R 4.5.0)\n P stringr 1.6.0 2025-11-04 [?] RSPM (R 4.5.0)\n P tibble * 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P utf8 1.2.6 2025-06-08 [?] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n P zip 2.3.3 2025-05-13 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/survey-stats-summary/execute-results/html.json b/_freeze/R/survey-stats-summary/execute-results/html.json index eded5cf08..9ef19f0ea 100644 --- a/_freeze/R/survey-stats-summary/execute-results/html.json +++ b/_freeze/R/survey-stats-summary/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "7cdef1be6ecce507c0e9fc937f4f0c4f", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Survey Summary Statistics using R\"\nbibliography: ../Comp/survey-stats-summary.bib\n---\n\nWhen conducting large-scale trials on samples of the population, it can be necessary to use a more complex sampling design than a simple random sample.\n\n- **Weighting** – If smaller populations are sampled more heavily to increase precision, then it is necessary to weight these observations in the analysis.\n\n- **Finite population correction** – Larger samples of populations result in lower variability in comparison to smaller samples.\n\n- **Stratification** – Dividing a population into sub-groups and sampling from each group. This protects from obtaining a very poor sample (e.g. under or over-represented groups), can give samples of a known precision, and gives more precise estimates for population means and totals.\n\n- **Clustering** – Dividing a population into sub-groups, and only sampling certain groups. This gives a lower precision, however can be much more convenient and cheaper - for example if surveying school children you may only sample a subset of schools to avoid travelling to a school to interview a single child.\n\nAll of these designs need to be taken into account when calculating statistics, and when producing models. Only summary statistics are discussed in this document, and variances are calculated using the default Taylor series linearisation methods. For a more detailed introduction to survey statistics in R, see [@Lohr_2022] or [@tlumley_2004].\n\nWe will use the [`{survey}`](https://cran.r-project.org/web/packages/survey/index.html) package, which is the standard for survey statistics in R. Note that for those who prefer the tidyverse, the [`{srvyr}`](https://cran.r-project.org/web/packages/srvyr/index.html) package is a wrapper around `{survey}` with `{dplyr}` like syntax.\n\n# Simple Survey Designs\n\nWe will use the [API]((https://r-survey.r-forge.r-project.org/survey/html/api.html)) dataset [@API_2000], which contains a number of datasets based on different samples from a dataset of academic performance. Initially we will just cover the methodology with a simple random sample and a finite population correction to demonstrate functionality.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survey)\n\ndata(\"api\")\n\nhead(apisrs) |>\n gt::gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n
cdsstypenamesnamesnumdnamednumcnamecnumflagpcttestapi00api99targetgrowthsch.widecomp.impbothawardsmealsellyr.rndmobilityacs.k3acs.46acs.corepct.respnot.hsghsgsome.colcol.gradgrad.schavg.edfullemerenrollapi.stupwfpc
15739081534155HMcFarland HighMcFarland High1039McFarland Unified432Kern14NA984624481814NoYesNoNo4431NA6NANA2482443412731.91713547742930.976194
19642126066716EStowers (Cecil Stowers (Cecil B.) Elementary1124ABC Unified1Los Angeles18NA100878831NA47YesYesYesYes825NA151930NA974102343213.66901047842030.976194
30664493030640HBrea-Olinda HigBrea-Olinda High2868Brea-Olinda Unified79Orange29NA987347423-8NoNoNoNo1010NA7NANA2895592141243.7183181410128730.976194
19644516012744EAlameda ElementAlameda Elementary1273Downey Unified187Los Angeles18NA997726577115YesYesYesYes7025NA2323NANA100374014811.96851834229130.976194
40688096043293ESunnyside ElemeSunnyside Elementary4926San Luis Coastal Unified640San Luis Obispo39NA99739719420YesYesYesYes4312NA122029NA918212734103.17100021718930.976194
19734456014278ELos Molinos EleLos Molinos Elementary2463Hacienda la Puente Unif284Los Angeles18NA93835822NA13YesYesYesNo1619NA131929NA71182038343.96752025821130.976194
\n
\n```\n\n:::\n:::\n\n\n## Mean\n\nIf we want to calculate a mean of a variable in a dataset which has been obtained from a **s**imple **r**andom **s**ample such as `apisrs`, in R we can create a design object using the `survey::svydesign` function (specifying that there is no PSU using `id = ~1` and the finite population correction using `fpc=~fpc`).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsrs_design <- survey::svydesign(id = ~1, fpc = ~fpc, data = apisrs)\n```\n:::\n\n\nThis design object stores all metadata about the sample alongside the data, and is used by all subsequent functions in the `{survey}` package. To calculate the mean, standard error, and confidence intervals of the `growth` variable, we can use the `survey::svymean` and `confint` functions:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Calculate mean and SE of growth. The standard error will be corrected by the finite population correction specified in the design\nsrs_means <- survey::svymean(~growth, srs_design)\n\nsrs_means\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n mean SE\ngrowth 31.9 2.0905\n```\n\n\n:::\n\n```{.r .cell-code}\n# Use degf() to get the degrees of freedom\nconfint(srs_means, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 %\ngrowth 27.77764 36.02236\n```\n\n\n:::\n:::\n\n\nNote that to obtain correct results, we had to specify the degrees of freedom using the design object.\n\n## Total\n\nCalculating population totals can be done using the `survey::svytotal` function in R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsurvey::svytotal(~growth, srs_design)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n total SE\ngrowth 197589 12949\n```\n\n\n:::\n:::\n\n\n## Ratios\n\nTo perform ratio analysis for means or proportions of analysis variables in R, we can `survey::svyratio`, here requesting that we do not `separate` the ratio estimation per Strata as this design is not stratified.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsvy_ratio <- survey::svyratio(\n ~api00,\n ~api99,\n srs_design,\n se = TRUE,\n df = survey::degf(srs_design),\n separate = FALSE\n)\n\nsvy_ratio\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nRatio estimator: svyratio.survey.design2(~api00, ~api99, srs_design, se = TRUE, \n df = survey::degf(srs_design), separate = FALSE)\nRatios=\n api99\napi00 1.051066\nSEs=\n api99\napi00 0.003603991\n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(svy_ratio, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 %\napi00/api99 1.043959 1.058173\n```\n\n\n:::\n:::\n\n\n## Proportions\n\nTo calculate a proportion in R, we use the `svymean` function on a factor or character column:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nprops <- survey::svymean(~sch.wide, srs_design)\n\nprops\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n mean SE\nsch.wideNo 0.185 0.0271\nsch.wideYes 0.815 0.0271\n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(props, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 %\nsch.wideNo 0.1316041 0.2383959\nsch.wideYes 0.7616041 0.8683959\n```\n\n\n:::\n:::\n\n\nFor proportions close to 0, it can be that `survey::svyciprop` is more accurate at producing confidence intervals than `confint`.\n\n## Quantiles\n\nTo calculate quantiles in R, we can use the `survey::svyquantile` function. Note that this function was reworked in version 4.1 of `{survey}`, and prior to this had different arguments and results. The current version of `svyquantile` has an `qrule` which is similar to the `type` argument in `quantile`, and can be used to change how the quantiles are calculated. For more information, see `vignette(\"qrule\", package=\"survey\")`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsurvey::svyquantile(\n ~growth,\n srs_design,\n quantiles = c(0.025, 0.5, 0.975),\n ci = TRUE,\n se = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$growth\n quantile ci.2.5 ci.97.5 se\n0.025 -16 -21 -12 2.281998\n0.5 27 24 31 1.774887\n0.975 103 93 189 24.341307\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\n# Summary Statistics on Complex Survey Designs\n\nMuch of the previous examples and notes still stand for more complex survey designs, here we will demonstrate using a dataset from NHANES [@NHANES_2010], which uses both stratification and clustering:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"nhanes\")\n\nhead(nhanes) |>\n gt::gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
SDMVPSUSDMVSTRAWTMEC2YRHI_CHOLraceagecatRIAGENDR
18381528.7702(19,39]1
18414509.2803(0,19]1
28612041.6403(0,19]1
27521000.3403(59,Inf]2
18822633.5801(19,39]1
28574112.4912(39,59]2
\n
\n```\n\n:::\n:::\n\n\nTo produce means and standard quartiles for this sample, taking account of sample design, we can use the following:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnhanes_design <- survey::svydesign(\n data = nhanes,\n id = ~SDMVPSU, # Specify the PSU/cluster column\n strata = ~SDMVSTRA, # The stratification column\n weights = ~WTMEC2YR, # The weighting column\n nest = TRUE # Allows for PSUs with the same name nested within different strata\n)\n\nsurvey::svymean(~HI_CHOL, nhanes_design, na.rm = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n mean SE\nHI_CHOL 0.11214 0.0054\n```\n\n\n:::\n\n```{.r .cell-code}\nsurvey::svyquantile(\n ~HI_CHOL,\n nhanes_design,\n quantiles = c(0.25, 0.5, 0.75),\n na.rm = TRUE,\n ci = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$HI_CHOL\n quantile ci.2.5 ci.97.5 se\n0.25 0 0 1 0.2358596\n0.5 0 0 1 0.2358596\n0.75 0 0 1 0.2358596\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\nIn R, we can perform domain estimations of different sub-populations by passing our required survey function to the `svyby` function. `svyby` can also take additional options to pass to the function, for example here we pass `na.rm=TRUE` and `deff=TRUE` to `svymean`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsurvey::svyby(\n ~HI_CHOL,\n ~race,\n nhanes_design,\n svymean,\n na.rm = TRUE,\n deff = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n race HI_CHOL se DEff.HI_CHOL\n1 1 0.10149167 0.006245843 1.082805\n2 2 0.12164921 0.006604134 1.407850\n3 3 0.07864006 0.010384645 2.091258\n4 4 0.09967861 0.024666227 3.098368\n```\n\n\n:::\n:::\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", + "markdown": "---\ntitle: \"Survey Summary Statistics using R\"\nbibliography: ../Comp/survey-stats-summary.bib\n---\n\nWhen conducting large-scale trials on samples of the population, it can be necessary to use a more complex sampling design than a simple random sample.\n\n- **Weighting** – If smaller populations are sampled more heavily to increase precision, then it is necessary to weight these observations in the analysis.\n\n- **Finite population correction** – Larger samples of populations result in lower variability in comparison to smaller samples.\n\n- **Stratification** – Dividing a population into sub-groups and sampling from each group. This protects from obtaining a very poor sample (e.g. under or over-represented groups), can give samples of a known precision, and gives more precise estimates for population means and totals.\n\n- **Clustering** – Dividing a population into sub-groups, and only sampling certain groups. This gives a lower precision, however can be much more convenient and cheaper - for example if surveying school children you may only sample a subset of schools to avoid travelling to a school to interview a single child.\n\nAll of these designs need to be taken into account when calculating statistics, and when producing models. Only summary statistics are discussed in this document, and variances are calculated using the default Taylor series linearisation methods. For a more detailed introduction to survey statistics in R, see [@Lohr_2022] or [@tlumley_2004].\n\nWe will use the [`{survey}`](https://cran.r-project.org/web/packages/survey/index.html) package, which is the standard for survey statistics in R. Note that for those who prefer the tidyverse, the [`{srvyr}`](https://cran.r-project.org/web/packages/srvyr/index.html) package is a wrapper around `{survey}` with `{dplyr}` like syntax.\n\n# Simple Survey Designs\n\nWe will use the [API]((https://r-survey.r-forge.r-project.org/survey/html/api.html)) dataset [@API_2000], which contains a number of datasets based on different samples from a dataset of academic performance. Initially we will just cover the methodology with a simple random sample and a finite population correction to demonstrate functionality.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survey)\n\ndata(\"api\")\n\nhead(apisrs) |>\n gt::gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n
cdsstypenamesnamesnumdnamednumcnamecnumflagpcttestapi00api99targetgrowthsch.widecomp.impbothawardsmealsellyr.rndmobilityacs.k3acs.46acs.corepct.respnot.hsghsgsome.colcol.gradgrad.schavg.edfullemerenrollapi.stupwfpc
15739081534155HMcFarland HighMcFarland High1039McFarland Unified432Kern14NA984624481814NoYesNoNo4431NA6NANA2482443412731.91713547742930.976194
19642126066716EStowers (Cecil Stowers (Cecil B.) Elementary1124ABC Unified1Los Angeles18NA100878831NA47YesYesYesYes825NA151930NA974102343213.66901047842030.976194
30664493030640HBrea-Olinda HigBrea-Olinda High2868Brea-Olinda Unified79Orange29NA987347423-8NoNoNoNo1010NA7NANA2895592141243.7183181410128730.976194
19644516012744EAlameda ElementAlameda Elementary1273Downey Unified187Los Angeles18NA997726577115YesYesYesYes7025NA2323NANA100374014811.96851834229130.976194
40688096043293ESunnyside ElemeSunnyside Elementary4926San Luis Coastal Unified640San Luis Obispo39NA99739719420YesYesYesYes4312NA122029NA918212734103.17100021718930.976194
19734456014278ELos Molinos EleLos Molinos Elementary2463Hacienda la Puente Unif284Los Angeles18NA93835822NA13YesYesYesNo1619NA131929NA71182038343.96752025821130.976194
\n
\n```\n\n:::\n:::\n\n\n## Mean\n\nIf we want to calculate a mean of a variable in a dataset which has been obtained from a **s**imple **r**andom **s**ample such as `apisrs`, in R we can create a design object using the `survey::svydesign` function (specifying that there is no PSU using `id = ~1` and the finite population correction using `fpc=~fpc`).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsrs_design <- survey::svydesign(id = ~1, fpc = ~fpc, data = apisrs)\n```\n:::\n\n\nThis design object stores all metadata about the sample alongside the data, and is used by all subsequent functions in the `{survey}` package. To calculate the mean, standard error, and confidence intervals of the `growth` variable, we can use the `survey::svymean` and `confint` functions:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Calculate mean and SE of growth. The standard error will be corrected by the finite population correction specified in the design\nsrs_means <- survey::svymean(~growth, srs_design)\n\nsrs_means\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n mean SE\ngrowth 31.9 2.0905\n```\n\n\n:::\n\n```{.r .cell-code}\n# Use degf() to get the degrees of freedom\nconfint(srs_means, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 %\ngrowth 27.77764 36.02236\n```\n\n\n:::\n:::\n\n\nNote that to obtain correct results, we had to specify the degrees of freedom using the design object.\n\n## Total\n\nCalculating population totals can be done using the `survey::svytotal` function in R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsurvey::svytotal(~growth, srs_design)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n total SE\ngrowth 197589 12949\n```\n\n\n:::\n:::\n\n\n## Ratios\n\nTo perform ratio analysis for means or proportions of analysis variables in R, we can `survey::svyratio`, here requesting that we do not `separate` the ratio estimation per Strata as this design is not stratified.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsvy_ratio <- survey::svyratio(\n ~api00,\n ~api99,\n srs_design,\n se = TRUE,\n df = survey::degf(srs_design),\n separate = FALSE\n)\n\nsvy_ratio\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nRatio estimator: svyratio.survey.design2(~api00, ~api99, srs_design, se = TRUE, \n df = survey::degf(srs_design), separate = FALSE)\nRatios=\n api99\napi00 1.051066\nSEs=\n api99\napi00 0.003603991\n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(svy_ratio, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 %\napi00/api99 1.043959 1.058173\n```\n\n\n:::\n:::\n\n\n## Proportions\n\nTo calculate a proportion in R, we use the `svymean` function on a factor or character column:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nprops <- survey::svymean(~sch.wide, srs_design)\n\nprops\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n mean SE\nsch.wideNo 0.185 0.0271\nsch.wideYes 0.815 0.0271\n```\n\n\n:::\n\n```{.r .cell-code}\nconfint(props, df = survey::degf(srs_design))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 %\nsch.wideNo 0.1316041 0.2383959\nsch.wideYes 0.7616041 0.8683959\n```\n\n\n:::\n:::\n\n\nFor proportions close to 0, it can be that `survey::svyciprop` is more accurate at producing confidence intervals than `confint`.\n\n## Quantiles\n\nTo calculate quantiles in R, we can use the `survey::svyquantile` function. Note that this function was reworked in version 4.1 of `{survey}`, and prior to this had different arguments and results. The current version of `svyquantile` has an `qrule` which is similar to the `type` argument in `quantile`, and can be used to change how the quantiles are calculated. For more information, see `vignette(\"qrule\", package=\"survey\")`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsurvey::svyquantile(\n ~growth,\n srs_design,\n quantiles = c(0.025, 0.5, 0.975),\n ci = TRUE,\n se = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$growth\n quantile ci.2.5 ci.97.5 se\n0.025 -16 -21 -12 2.281998\n0.5 27 24 31 1.774887\n0.975 99 84 189 26.623305\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\n# Summary Statistics on Complex Survey Designs\n\nMuch of the previous examples and notes still stand for more complex survey designs, here we will demonstrate using a dataset from NHANES [@NHANES_2010], which uses both stratification and clustering:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"nhanes\")\n\nhead(nhanes) |>\n gt::gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
SDMVPSUSDMVSTRAWTMEC2YRHI_CHOLraceagecatRIAGENDR
18381528.7702(19,39]1
18414509.2803(0,19]1
28612041.6403(0,19]1
27521000.3403(59,Inf]2
18822633.5801(19,39]1
28574112.4912(39,59]2
\n
\n```\n\n:::\n:::\n\n\nTo produce means and standard quartiles for this sample, taking account of sample design, we can use the following:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nnhanes_design <- survey::svydesign(\n data = nhanes,\n id = ~SDMVPSU, # Specify the PSU/cluster column\n strata = ~SDMVSTRA, # The stratification column\n weights = ~WTMEC2YR, # The weighting column\n nest = TRUE # Allows for PSUs with the same name nested within different strata\n)\n\nsurvey::svymean(~HI_CHOL, nhanes_design, na.rm = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n mean SE\nHI_CHOL 0.11214 0.0054\n```\n\n\n:::\n\n```{.r .cell-code}\nsurvey::svyquantile(\n ~HI_CHOL,\n nhanes_design,\n quantiles = c(0.25, 0.5, 0.75),\n na.rm = TRUE,\n ci = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$HI_CHOL\n quantile ci.2.5 ci.97.5 se\n0.25 0 0 1 0.2358596\n0.5 0 0 1 0.2358596\n0.75 0 0 1 0.2358596\n\nattr(,\"hasci\")\n[1] TRUE\nattr(,\"class\")\n[1] \"newsvyquantile\"\n```\n\n\n:::\n:::\n\n\nIn R, we can perform domain estimations of different sub-populations by passing our required survey function to the `svyby` function. `svyby` can also take additional options to pass to the function, for example here we pass `na.rm=TRUE` and `deff=TRUE` to `svymean`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsurvey::svyby(\n ~HI_CHOL,\n ~race,\n nhanes_design,\n svymean,\n na.rm = TRUE,\n deff = TRUE\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n race HI_CHOL se DEff.HI_CHOL\n1 1 0.10149167 0.006245843 1.082805\n2 2 0.12164921 0.006604134 1.407850\n3 3 0.07864006 0.010384645 2.091258\n4 4 0.09967861 0.024666227 3.098368\n```\n\n\n:::\n:::\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/survival/execute-results/html.json b/_freeze/R/survival/execute-results/html.json index f286a11e9..c492716bb 100644 --- a/_freeze/R/survival/execute-results/html.json +++ b/_freeze/R/survival/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "48b58d2031d3ac6b2d1a6f5f06020ef3", + "hash": "bdea5cf1e92f01eb25d5adf0aea95de0", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Survival Analysis Using R\"\n---\n\nThe most commonly used survival analysis methods in clinical trials include:\n\n- Kaplan-Meier (KM) estimators: non-parametric statistics utilized for estimating the survival function\n\n- Log-rank test: a non-parametric test for comparing the survival functions across two or more groups\n\n- Cox proportional hazards (PH) model: a semi-parametric model often used to assess the relationship between the survival time and explanatory variables\n\nAdditionally, other methods for analyzing time-to-event data are available, such as:\n\n- Parametric survival model\n\n- Accelerated failure time model\n\n- Competing risk model\n\n- Restricted mean survival time\n\n- Time-dependent Cox model\n\nWhile these models may be explored in a separate document, this particular document focuses solely on the three most prevalent methods: KM estimators, log-rank test and Cox PH model.\n\n# Analysis of Time-to-event Data\n\nBelow is a standard mock-up for survival analysis in clinical trials.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/layout.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Example Data\n\nData source: https://stats.idre.ucla.edu/sas/seminars/sas-survival/\n\nThe data include 500 subjects from the Worcester Heart Attack Study. This study examined several factors, such as age, gender and BMI, that may influence survival time after heart attack. Follow up time for all participants begins at the time of hospital admission after heart attack and ends with death or loss to follow up (censoring). The variables used here are:\n\n- lenfol: length of followup, terminated either by death or censoring - time variable\n\n- fstat: loss to followup = 0, death = 1 - censoring variable\n\n- afb: atrial fibrillation, no = 0, 1 = yes - explanatory variable\n\n- gender: males = 0, females = 1 - stratification factor\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(haven)\nlibrary(survival)\nlibrary(survminer)\nlibrary(ggsurvfit)\nlibrary(broom)\nlibrary(knitr)\nknitr::opts_chunk$set(echo = TRUE)\n\ndat <- haven::read_sas(file.path(\"../data/whas500.sas7bdat\")) |>\n mutate(\n LENFOLY = round(LENFOL / 365.25, 2), ## change follow-up days to years for better visualization\n AFB = factor(AFB, levels = c(1, 0))\n ) ## change AFB order to use \"Yes\" as the reference group to be consistent with SAS\n```\n:::\n\n\n## The Non-stratified Model\n\nFirst we try a non-stratified analysis following the mock-up above to describe the association between survival time and afb (atrial fibrillation).\n\nThe KM estimators are from `survival::survfit` function, the log-rank test uses `survminer::surv_pvalue`, and Cox PH model is conducted using `survival::coxph` function. Numerous R packages and functions are available for performing survival analysis. The author has selected `survival` and `survminer` for use in this context, but alternative options can also be employed for survival analysis.\n\n### KM estimators\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km <- survival::survfit(survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\n\n## quantile estimates\nquantile(fit.km, probs = c(0.25, 0.5, 0.75))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$quantile\n 25 50 75\nAFB=1 0.26 2.37 6.43\nAFB=0 0.94 5.91 6.44\n\n$lower\n 25 50 75\nAFB=1 0.05 1.27 4.24\nAFB=0 0.55 4.32 6.44\n\n$upper\n 25 50 75\nAFB=1 1.11 4.24 NA\nAFB=0 1.47 NA NA\n```\n\n\n:::\n\n```{.r .cell-code}\n## landmark estimates at 1, 3, 5-year\nsummary(fit.km, times = c(1, 3, 5))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall: survfit(formula = survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\n\n AFB=1 \n time n.risk n.event survival std.err lower 95% CI upper 95% CI\n 1 50 28 0.641 0.0543 0.543 0.757\n 3 27 12 0.455 0.0599 0.351 0.589\n 5 11 6 0.315 0.0643 0.211 0.470\n\n AFB=0 \n time n.risk n.event survival std.err lower 95% CI upper 95% CI\n 1 312 110 0.739 0.0214 0.699 0.782\n 3 199 33 0.642 0.0245 0.595 0.691\n 5 77 20 0.530 0.0311 0.472 0.595\n```\n\n\n:::\n:::\n\n\n### Log-rank test\n\nThere are multiple ways to output the log-rank test. The survdiff() function from {survival} package performs a log-rank test (or its weighted variants) to compare survival curves between two or more treatment groups. rho=0 is the default and gives the standard log-rank test. rho=1 would output the Peto-Peto test (which weights earliest events more heavily).\n\nYou can also use {survminer} package as shown below or {ggsurvfit} package using add_pvalue option if you want the p-value to be put into a KM plot - See example in Kaplan Meier section below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#survdiff() from survival package: unrounded pvalue=0.0009646027\nsurvdiff(Surv(LENFOLY, FSTAT) ~ AFB, data = dat, rho=0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvdiff(formula = Surv(LENFOLY, FSTAT) ~ AFB, data = dat, rho = 0)\n\n N Observed Expected (O-E)^2/E (O-E)^2/V\nAFB=1 78 47 30.3 9.26 10.9\nAFB=0 422 168 184.7 1.52 10.9\n\n Chisq= 10.9 on 1 degrees of freedom, p= 0.001 \n```\n\n\n:::\n\n```{.r .cell-code}\n#surv_pvalue() from survminer\nsurvminer::surv_pvalue(fit.km, data = dat)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n variable pval method pval.txt\n1 AFB 0.0009646027 Log-rank p = 0.00096\n```\n\n\n:::\n:::\n\n\n### Cox PH model\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox <- survival::coxph(survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\nfit.cox |>\n tidy(exponentiate = TRUE, conf.int = TRUE, conf.level = 0.95) |>\n select(term, estimate, conf.low, conf.high)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 4\n term estimate conf.low conf.high\n \n1 AFB0 0.583 0.421 0.806\n```\n\n\n:::\n:::\n\n\n## The Stratified Model\n\nIn a stratified model, the Kaplan-Meier estimators remain the same as those in the non-stratified model. To implement stratified log-rank tests and Cox proportional hazards models, simply include the strata() function within the model formula.\n\n### Stratified Log-rank test\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km.str <- survival::survfit(\n survival::Surv(LENFOLY, FSTAT) ~ AFB + survival::strata(GENDER),\n data = dat\n)\n\nsurvminer::surv_pvalue(fit.km.str, data = dat)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n variable pval method pval.txt\n1 AFB+survival::strata(GENDER) 0.001506607 Log-rank p = 0.0015\n```\n\n\n:::\n:::\n\n\n### Stratified Cox PH model\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox.str <- survival::coxph(\n survival::Surv(LENFOLY, FSTAT) ~ AFB + survival::strata(GENDER),\n data = dat\n)\nfit.cox.str |>\n tidy(exponentiate = TRUE, conf.int = TRUE, conf.level = 0.95) |>\n select(term, estimate, conf.low, conf.high)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 4\n term estimate conf.low conf.high\n \n1 AFB0 0.594 0.430 0.823\n```\n\n\n:::\n:::\n\n\n### Kaplan-Meier Graphs\n\nYou can use {survminer} or {ggsurvfit} packages to create kaplan-meier graphs including presentation of the number at risk and number of events under the graph. Both methods are highly customizable.\n\nIt is good practice to ensure your categorical factors are specified as such and are clearly labelled. {forcats} package is useful for recoding factors as shown below using fct_recode().\n\n{ggsurvfit} is shown here because the code coverage is higher for this package than for {survminer}.\n\nThe code below, fits the model, adds a log-rank test p-value, limits the X axis, controls the major scale and minor scale of Y and X axis, adds a risk table under the graph showing number at risk and the cumulative events, color codes the lines to allow easy identification of AFB and Gender and adds appropriate titles and axis labels.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat2<- dat %>%\n mutate(Treatment=fct_recode(AFB, 'Without AFB'='0','With AFB'='1')) %>% \n mutate(GENDER_F = factor(GENDER, labels=c('Female','Male')))\n\nsurvfit2(Surv(LENFOLY, FSTAT) ~ Treatment + strata(GENDER_F), data = dat2) %>% \n ggsurvfit() + \n add_pvalue(rho=0) +\n coord_cartesian(xlim = c(0, 6)) +\n scale_y_continuous(breaks = seq(0, 1, by = 0.1), minor_breaks=NULL) +\n scale_x_continuous(breaks = seq(0, 6, by = 1), minor_breaks=NULL) +\n add_risktable(risktable_stats='{n.risk}({cum.event})') +\n scale_color_manual(values=c('Blue','lightskyblue','red','hotpink')) +\n labs(y='Percentage Survival',\n x='Time (days)',\n title='Time to death for patients with or without AFB')\n```\n\n::: {.cell-output-display}\n![](survival_files/figure-html/unnamed-chunk-8-1.png){width=672}\n:::\n:::\n\n\n# Reference\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P abind 1.4-8 2024-09-12 [?] RSPM\n askpass 1.2.1 2024-10-04 [1] RSPM\n P backports 1.5.0 2024-05-23 [?] RSPM\n base64enc 0.1-6 2026-02-02 [1] RSPM\n bit 4.6.0 2025-03-06 [1] RSPM\n bit64 4.6.0-1 2025-01-16 [1] RSPM\n blob 1.3.0 2026-01-14 [1] RSPM\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.2)\n P broom * 1.0.12 2026-01-27 [?] RSPM\n bslib 0.10.0 2026-01-26 [1] RSPM\n cachem 1.1.0 2024-05-16 [1] RSPM\n callr 3.7.6 2024-03-25 [1] RSPM\n P car 3.1-5 2026-02-03 [?] RSPM\n P carData 3.0-6 2026-01-30 [?] RSPM\n cellranger 1.1.0 2016-07-27 [1] RSPM\n P cli 3.6.5 2025-04-23 [?] RSPM\n clipr 0.8.0 2022-02-22 [1] RSPM\n colorspace 2.1-2 2025-09-22 [1] RSPM\n commonmark 2.0.0 2025-07-07 [1] RSPM\n conflicted 1.2.0 2023-02-01 [1] RSPM\n corrplot 0.95 2024-10-14 [1] RSPM\n cowplot 1.2.0 2025-07-07 [1] RSPM\n cpp11 0.5.3 2026-01-20 [1] RSPM\n crayon 1.5.3 2024-06-20 [1] RSPM\n curl 7.0.0 2025-08-19 [1] RSPM\n P data.table 1.18.2.1 2026-01-27 [?] RSPM\n DBI 1.2.3 2024-06-02 [1] RSPM\n dbplyr 2.5.2 2026-02-13 [1] RSPM\n Deriv 4.2.0 2025-06-20 [1] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n doBy 4.7.1 2025-12-02 [1] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n dtplyr 1.3.3 2026-02-11 [1] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n exactRankTests 0.8-35 2022-04-26 [1] RSPM\n P farver 2.1.2 2024-05-13 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n fontawesome 0.5.3 2024-11-16 [1] RSPM\n P forcats * 1.0.1 2025-09-25 [?] RSPM\n forecast 9.0.1 2026-02-14 [1] RSPM\n P Formula 1.2-5 2023-02-24 [?] RSPM\n fracdiff 1.5-3 2024-02-01 [1] RSPM\n fs 1.6.6 2025-04-12 [1] RSPM\n gargle 1.6.1 2026-01-29 [1] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM\n P ggpubr * 0.6.2 2025-10-17 [?] RSPM\n ggrepel 0.9.6 2024-09-07 [1] RSPM\n ggsci 4.2.0 2025-12-17 [1] RSPM\n P ggsignif 0.6.4 2022-10-13 [?] RSPM\n P ggsurvfit * 1.2.0 2025-09-13 [?] RSPM\n ggtext 0.1.2 2022-09-16 [1] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n googledrive 2.1.2 2025-09-10 [1] RSPM\n googlesheets4 1.1.2 2025-09-03 [1] RSPM\n P gridExtra 2.3 2017-09-09 [?] RSPM\n gridtext 0.1.5 2022-09-16 [1] RSPM\n P gtable 0.3.6 2024-10-25 [?] RSPM\n P haven * 2.5.5 2025-05-30 [?] RSPM\n highr 0.11 2024-05-26 [1] RSPM\n P hms 1.1.4 2025-10-17 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n httr 1.4.8 2026-02-13 [1] RSPM\n ids 1.0.1 2017-05-31 [1] RSPM\n isoband 0.3.0 2025-12-07 [1] RSPM\n jpeg 0.1-11 2025-03-21 [1] RSPM\n jquerylib 0.1.4 2021-04-26 [1] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P km.ci 0.5-6 2022-04-06 [?] RSPM\n P KMsurv 0.1-6 2025-05-20 [?] RSPM\n P knitr * 1.51 2025-12-20 [?] RSPM\n P labeling 0.4.3 2023-08-29 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n litedown 0.9 2025-12-18 [1] RSPM\n lme4 1.1-38 2025-12-02 [1] RSPM\n lmtest 0.9-40 2022-03-21 [1] RSPM\n P lubridate * 1.9.5 2026-02-04 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n markdown 2.0 2025-03-23 [1] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n MatrixModels 0.5-4 2025-03-26 [1] RSPM\n maxstat 0.7-26 2025-05-02 [1] RSPM\n memoise 2.0.1 2021-11-26 [1] RSPM\n mgcv 1.9-3 2025-04-04 [2] CRAN (R 4.5.2)\n microbenchmark 1.5.0 2024-09-04 [1] RSPM\n mime 0.13 2025-03-17 [1] RSPM\n minqa 1.2.8 2024-08-17 [1] RSPM\n modelr 0.1.11 2023-03-22 [1] RSPM\n mvtnorm 1.3-3 2025-01-10 [1] RSPM\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n nloptr 2.2.1 2025-03-17 [1] RSPM\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.2)\n numDeriv 2016.8-1.1 2019-06-06 [1] RSPM\n openssl 2.3.4 2025-09-30 [1] RSPM\n P patchwork 1.3.2 2025-08-25 [?] RSPM\n pbkrtest 0.5.5 2025-07-18 [1] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n png 0.1-8 2022-11-29 [1] RSPM\n polynom 1.4-1 2022-04-11 [1] RSPM\n prettyunits 1.2.0 2023-09-24 [1] RSPM\n processx 3.8.6 2025-02-21 [1] RSPM\n progress 1.2.3 2023-12-06 [1] RSPM\n ps 1.9.1 2025-04-12 [1] RSPM\n P purrr * 1.2.1 2026-01-09 [?] RSPM\n quantreg 6.1 2025-03-10 [1] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n ragg 1.5.0 2025-09-02 [1] RSPM\n rappdirs 0.3.4 2026-01-17 [1] RSPM\n rbibutils 2.4.1 2026-01-21 [1] RSPM\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM\n Rcpp 1.1.1 2026-01-10 [1] RSPM\n RcppArmadillo 15.2.3-1 2025-12-17 [1] RSPM\n RcppEigen 0.3.4.0.2 2024-08-24 [1] RSPM\n Rdpack 2.6.6 2026-02-08 [1] RSPM\n P readr * 2.1.6 2025-11-14 [?] RSPM\n readxl 1.4.5 2025-03-07 [1] RSPM\n reformulas 0.4.4 2026-02-02 [1] RSPM\n rematch 2.0.0 2023-08-30 [1] RSPM\n rematch2 2.1.2 2020-05-01 [1] RSPM\n reprex 2.1.1 2024-07-06 [1] RSPM\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n P rstatix 0.7.3 2025-10-18 [?] RSPM\n rstudioapi 0.18.0 2026-01-16 [1] RSPM\n rvest 1.0.5 2025-08-29 [1] RSPM\n P S7 0.2.1 2025-11-14 [?] RSPM\n sass 0.4.10 2025-04-11 [1] RSPM\n P scales 1.4.0 2025-04-24 [?] RSPM\n selectr 0.5-1 2025-12-17 [1] RSPM\n SparseM 1.84-2 2024-07-17 [1] RSPM\n P stringi 1.8.7 2025-03-27 [?] RSPM\n P stringr * 1.6.0 2025-11-04 [?] RSPM\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P survminer * 0.5.1 2025-09-02 [?] RSPM\n P survMisc 0.5.6 2022-04-07 [?] RSPM\n sys 3.4.3 2024-10-04 [1] RSPM\n systemfonts 1.3.1 2025-10-01 [1] RSPM\n textshaping 1.0.4 2025-10-10 [1] RSPM\n P tibble * 3.3.1 2026-01-11 [?] RSPM\n P tidyr * 1.3.2 2025-12-19 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM\n P timechange 0.4.0 2026-01-29 [?] RSPM\n timeDate 4052.112 2026-01-28 [1] RSPM\n tinytex 0.58 2025-11-19 [1] RSPM\n P tzdb 0.5.0 2025-03-15 [?] RSPM\n urca 1.3-4 2024-05-27 [1] RSPM\n P utf8 1.2.6 2025-06-08 [?] RSPM\n uuid 1.2-2 2026-01-23 [1] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n viridisLite 0.4.3 2026-02-04 [1] RSPM\n vroom 1.7.0 2026-01-27 [1] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n xml2 1.5.2 2026-01-17 [1] RSPM\n P xtable 1.8-4 2019-04-21 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n P zoo 1.8-15 2025-12-15 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n", + "markdown": "---\ntitle: \"Survival Analysis Using R\"\n---\n\nThe most commonly used survival analysis methods in clinical trials include:\n\n- Kaplan-Meier (KM) estimators: non-parametric statistics utilized for estimating the survival function\n\n- Log-rank test: a non-parametric test for comparing the survival functions across two or more groups\n\n- Cox proportional hazards (PH) model: a semi-parametric model often used to assess the relationship between the survival time and explanatory variables\n\nAdditionally, other methods for analyzing time-to-event data are available, such as:\n\n- Parametric survival model\n\n- Accelerated failure time model\n\n- Competing risk model\n\n- Restricted mean survival time\n\n- Time-dependent Cox model\n\nWhile these models may be explored in a separate document, this particular document focuses solely on the three most prevalent methods: KM estimators, log-rank test and Cox PH model.\n\n# Analysis of Time-to-event Data\n\nBelow is a standard mock-up for survival analysis in clinical trials.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/layout.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Example Data\n\nData source: https://stats.idre.ucla.edu/sas/seminars/sas-survival/\n\nThe data include 500 subjects from the Worcester Heart Attack Study. This study examined several factors, such as age, gender and BMI, that may influence survival time after heart attack. Follow up time for all participants begins at the time of hospital admission after heart attack and ends with death or loss to follow up (censoring). The variables used here are:\n\n- lenfol: length of followup, terminated either by death or censoring - time variable\n\n- fstat: loss to followup = 0, death = 1 - censoring variable\n\n- afb: atrial fibrillation, no = 0, 1 = yes - explanatory variable\n\n- gender: males = 0, females = 1 - stratification factor\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(haven)\nlibrary(survival)\nlibrary(survminer)\nlibrary(ggsurvfit)\nlibrary(broom)\nlibrary(knitr)\nknitr::opts_chunk$set(echo = TRUE)\n\ndat <- haven::read_sas(file.path(\"../data/whas500.sas7bdat\")) |>\n mutate(\n LENFOLY = round(LENFOL / 365.25, 2), ## change follow-up days to years for better visualization\n AFB = factor(AFB, levels = c(1, 0))\n ) ## change AFB order to use \"Yes\" as the reference group to be consistent with SAS\n```\n:::\n\n\n## The Non-stratified Model\n\nFirst we try a non-stratified analysis following the mock-up above to describe the association between survival time and afb (atrial fibrillation).\n\nThe KM estimators are from `survival::survfit` function, the log-rank test uses `survminer::surv_pvalue`, and Cox PH model is conducted using `survival::coxph` function. Numerous R packages and functions are available for performing survival analysis. The author has selected `survival` and `survminer` for use in this context, but alternative options can also be employed for survival analysis.\n\n### KM estimators\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km <- survival::survfit(survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\n\n## quantile estimates\nquantile(fit.km, probs = c(0.25, 0.5, 0.75))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$quantile\n 25 50 75\nAFB=1 0.26 2.37 6.43\nAFB=0 0.94 5.91 6.44\n\n$lower\n 25 50 75\nAFB=1 0.05 1.27 4.24\nAFB=0 0.55 4.32 6.44\n\n$upper\n 25 50 75\nAFB=1 1.11 4.24 NA\nAFB=0 1.47 NA NA\n```\n\n\n:::\n\n```{.r .cell-code}\n## landmark estimates at 1, 3, 5-year\nsummary(fit.km, times = c(1, 3, 5))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall: survfit(formula = survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\n\n AFB=1 \n time n.risk n.event survival std.err lower 95% CI upper 95% CI\n 1 50 28 0.641 0.0543 0.543 0.757\n 3 27 12 0.455 0.0599 0.351 0.589\n 5 11 6 0.315 0.0643 0.211 0.470\n\n AFB=0 \n time n.risk n.event survival std.err lower 95% CI upper 95% CI\n 1 312 110 0.739 0.0214 0.699 0.782\n 3 199 33 0.642 0.0245 0.595 0.691\n 5 77 20 0.530 0.0311 0.472 0.595\n```\n\n\n:::\n:::\n\n\n### Log-rank test\n\nThere are multiple ways to output the log-rank test. The survdiff() function from {survival} package performs a log-rank test (or its weighted variants) to compare survival curves between two or more treatment groups. rho=0 is the default and gives the standard log-rank test. rho=1 would output the Peto-Peto test (which weights earliest events more heavily).\n\nYou can also use {survminer} package as shown below or {ggsurvfit} package using add_pvalue option if you want the p-value to be put into a KM plot - See example in Kaplan Meier section below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n#survdiff() from survival package: unrounded pvalue=0.0009646027\nsurvdiff(Surv(LENFOLY, FSTAT) ~ AFB, data = dat, rho=0)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvdiff(formula = Surv(LENFOLY, FSTAT) ~ AFB, data = dat, rho = 0)\n\n N Observed Expected (O-E)^2/E (O-E)^2/V\nAFB=1 78 47 30.3 9.26 10.9\nAFB=0 422 168 184.7 1.52 10.9\n\n Chisq= 10.9 on 1 degrees of freedom, p= 0.001 \n```\n\n\n:::\n\n```{.r .cell-code}\n#surv_pvalue() from survminer\nsurvminer::surv_pvalue(fit.km, data = dat)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n variable pval method pval.txt\n1 AFB 0.0009646027 Log-rank p = 0.00096\n```\n\n\n:::\n:::\n\n\n### Cox PH model\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox <- survival::coxph(survival::Surv(LENFOLY, FSTAT) ~ AFB, data = dat)\nfit.cox |>\n tidy(exponentiate = TRUE, conf.int = TRUE, conf.level = 0.95) |>\n select(term, estimate, conf.low, conf.high)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 4\n term estimate conf.low conf.high\n \n1 AFB0 0.583 0.421 0.806\n```\n\n\n:::\n:::\n\n\n## The Stratified Model\n\nIn a stratified model, the Kaplan-Meier estimators remain the same as those in the non-stratified model. To implement stratified log-rank tests and Cox proportional hazards models, simply include the strata() function within the model formula.\n\n### Stratified Log-rank test\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.km.str <- survival::survfit(\n survival::Surv(LENFOLY, FSTAT) ~ AFB + survival::strata(GENDER),\n data = dat\n)\n\nsurvminer::surv_pvalue(fit.km.str, data = dat)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n variable pval method pval.txt\n1 AFB+survival::strata(GENDER) 0.001506607 Log-rank p = 0.0015\n```\n\n\n:::\n:::\n\n\n### Stratified Cox PH model\n\nNOTE: if using {survival} package version 3.8.0 the below code works correctly (specifically with using survival::strata), however see [news](https://cran.r-project.org/web/packages/survival/news.html), changes were made to version 3.7-3 and 3.8.0 to ensure survival::strata() works the same as strata(). If using a {survival} package version prior to 3.8.0 **NEVER** use survival::strata() (instead use just strata(), otherwise the stratification variable isn't fitted as you expect. You can see this as it appears as a covariate in the output with hazard ratio estimated. Whereas, when fitted as a strata, it should not.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfit.cox.str <- survival::coxph(\n survival::Surv(LENFOLY, FSTAT) ~ AFB + survival::strata(GENDER),\n data = dat\n)\nfit.cox.str |>\n tidy(exponentiate = TRUE, conf.int = TRUE, conf.level = 0.95) |>\n select(term, estimate, conf.low, conf.high)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 1 × 4\n term estimate conf.low conf.high\n \n1 AFB0 0.594 0.430 0.823\n```\n\n\n:::\n:::\n\n\n### Kaplan-Meier Graphs\n\nYou can use {survminer} or {ggsurvfit} packages to create kaplan-meier graphs including presentation of the number at risk and number of events under the graph. Both methods are highly customizable.\n\nIt is good practice to ensure your categorical factors are specified as such and are clearly labelled. {forcats} package is useful for recoding factors as shown below using fct_recode().\n\n{ggsurvfit} is shown here because the code coverage is higher for this package than for {survminer}.\n\nThe code below, fits the model, adds a log-rank test p-value, limits the X axis, controls the major scale and minor scale of Y and X axis, adds a risk table under the graph showing number at risk and the cumulative events, color codes the lines to allow easy identification of AFB and Gender and adds appropriate titles and axis labels.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat2<- dat %>%\n mutate(Treatment=fct_recode(AFB, 'Without AFB'='0','With AFB'='1')) %>% \n mutate(GENDER_F = factor(GENDER, labels=c('Female','Male')))\n\nsurvfit2(Surv(LENFOLY, FSTAT) ~ Treatment + strata(GENDER_F), data = dat2) %>% \n ggsurvfit() + \n add_pvalue(rho=0) +\n coord_cartesian(xlim = c(0, 6)) +\n scale_y_continuous(breaks = seq(0, 1, by = 0.1), minor_breaks=NULL) +\n scale_x_continuous(breaks = seq(0, 6, by = 1), minor_breaks=NULL) +\n add_risktable(risktable_stats='{n.risk}({cum.event})') +\n scale_color_manual(values=c('Blue','lightskyblue','red','hotpink')) +\n labs(y='Percentage Survival',\n x='Time (days)',\n title='Time to death for patients with or without AFB')\n```\n\n::: {.cell-output-display}\n![](survival_files/figure-html/unnamed-chunk-8-1.png){width=672}\n:::\n:::\n\n\n# Reference\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.3 (2026-03-11)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P abind 1.4-8 2024-09-12 [?] RSPM (R 4.5.0)\n askpass 1.2.1 2024-10-04 [1] RSPM (R 4.5.0)\n P backports 1.5.0 2024-05-23 [?] RSPM (R 4.5.0)\n base64enc 0.1-6 2026-02-02 [1] RSPM (R 4.5.0)\n bit 4.6.0 2025-03-06 [1] RSPM (R 4.5.0)\n bit64 4.6.0-1 2025-01-16 [1] RSPM (R 4.5.0)\n blob 1.3.0 2026-01-14 [1] RSPM (R 4.5.0)\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.3)\n P broom * 1.0.12 2026-01-27 [?] RSPM (R 4.5.0)\n bslib 0.10.0 2026-01-26 [1] RSPM (R 4.5.0)\n cachem 1.1.0 2024-05-16 [1] RSPM (R 4.5.0)\n callr 3.7.6 2024-03-25 [1] RSPM (R 4.5.0)\n P car 3.1-5 2026-02-03 [?] RSPM (R 4.5.0)\n P carData 3.0-6 2026-01-30 [?] RSPM (R 4.5.0)\n cellranger 1.1.0 2016-07-27 [1] RSPM (R 4.5.0)\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n clipr 0.8.0 2022-02-22 [1] RSPM (R 4.5.0)\n colorspace 2.1-2 2025-09-22 [1] RSPM (R 4.5.0)\n commonmark 2.0.0 2025-07-07 [1] RSPM (R 4.5.0)\n conflicted 1.2.0 2023-02-01 [1] RSPM (R 4.5.0)\n corrplot 0.95 2024-10-14 [1] RSPM (R 4.5.0)\n cowplot 1.2.0 2025-07-07 [1] RSPM (R 4.5.0)\n cpp11 0.5.3 2026-01-20 [1] RSPM (R 4.5.0)\n crayon 1.5.3 2024-06-20 [1] RSPM (R 4.5.0)\n curl 7.0.0 2025-08-19 [1] RSPM (R 4.5.0)\n P data.table 1.18.2.1 2026-01-27 [?] RSPM (R 4.5.0)\n DBI 1.2.3 2024-06-02 [1] RSPM (R 4.5.0)\n dbplyr 2.5.2 2026-02-13 [1] RSPM (R 4.5.0)\n Deriv 4.2.0 2025-06-20 [1] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n doBy 4.7.1 2025-12-02 [1] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n dtplyr 1.3.3 2026-02-11 [1] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n exactRankTests 0.8-35 2022-04-26 [1] RSPM (R 4.5.0)\n P farver 2.1.2 2024-05-13 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n fontawesome 0.5.3 2024-11-16 [1] RSPM (R 4.5.0)\n P forcats * 1.0.1 2025-09-25 [?] RSPM (R 4.5.0)\n forecast 9.0.1 2026-02-14 [1] RSPM (R 4.5.0)\n P Formula 1.2-5 2023-02-24 [?] RSPM (R 4.5.0)\n fracdiff 1.5-3 2024-02-01 [1] RSPM (R 4.5.0)\n fs 1.6.6 2025-04-12 [1] RSPM (R 4.5.0)\n gargle 1.6.1 2026-01-29 [1] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM (R 4.5.0)\n P ggpubr * 0.6.2 2025-10-17 [?] RSPM (R 4.5.0)\n ggrepel 0.9.6 2024-09-07 [1] RSPM (R 4.5.0)\n ggsci 4.2.0 2025-12-17 [1] RSPM (R 4.5.0)\n P ggsignif 0.6.4 2022-10-13 [?] RSPM (R 4.5.0)\n P ggsurvfit * 1.2.0 2025-09-13 [?] RSPM (R 4.5.0)\n ggtext 0.1.2 2022-09-16 [1] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n googledrive 2.1.2 2025-09-10 [1] RSPM (R 4.5.0)\n googlesheets4 1.1.2 2025-09-03 [1] RSPM (R 4.5.0)\n P gridExtra 2.3 2017-09-09 [?] RSPM (R 4.5.0)\n gridtext 0.1.5 2022-09-16 [1] RSPM (R 4.5.0)\n P gtable 0.3.6 2024-10-25 [?] RSPM (R 4.5.0)\n P haven * 2.5.5 2025-05-30 [?] RSPM (R 4.5.0)\n highr 0.11 2024-05-26 [1] RSPM (R 4.5.0)\n P hms 1.1.4 2025-10-17 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n httr 1.4.8 2026-02-13 [1] RSPM (R 4.5.0)\n ids 1.0.1 2017-05-31 [1] RSPM (R 4.5.0)\n isoband 0.3.0 2025-12-07 [1] RSPM (R 4.5.0)\n jpeg 0.1-11 2025-03-21 [1] RSPM (R 4.5.0)\n jquerylib 0.1.4 2021-04-26 [1] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P km.ci 0.5-6 2022-04-06 [?] RSPM (R 4.5.0)\n P KMsurv 0.1-6 2025-05-20 [?] RSPM (R 4.5.0)\n P knitr * 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P labeling 0.4.3 2023-08-29 [?] RSPM (R 4.5.0)\n P lattice 0.22-7 2025-04-02 [?] RSPM (R 4.5.0)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n litedown 0.9 2025-12-18 [1] RSPM (R 4.5.0)\n lme4 1.1-38 2025-12-02 [1] RSPM (R 4.5.0)\n lmtest 0.9-40 2022-03-21 [1] RSPM (R 4.5.0)\n P lubridate * 1.9.5 2026-02-04 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n markdown 2.0 2025-03-23 [1] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.3)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.3)\n MatrixModels 0.5-4 2025-03-26 [1] RSPM (R 4.5.0)\n maxstat 0.7-26 2025-05-02 [1] RSPM (R 4.5.0)\n memoise 2.0.1 2021-11-26 [1] RSPM (R 4.5.0)\n mgcv 1.9-3 2025-04-04 [1] RSPM (R 4.5.0)\n microbenchmark 1.5.0 2024-09-04 [1] RSPM (R 4.5.0)\n mime 0.13 2025-03-17 [1] RSPM (R 4.5.0)\n minqa 1.2.8 2024-08-17 [1] RSPM (R 4.5.0)\n modelr 0.1.11 2023-03-22 [1] RSPM (R 4.5.0)\n mvtnorm 1.3-3 2025-01-10 [1] RSPM (R 4.5.0)\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.3)\n nloptr 2.2.1 2025-03-17 [1] RSPM (R 4.5.0)\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.3)\n numDeriv 2016.8-1.1 2019-06-06 [1] RSPM (R 4.5.0)\n openssl 2.3.4 2025-09-30 [1] RSPM (R 4.5.0)\n P patchwork 1.3.2 2025-08-25 [?] RSPM (R 4.5.0)\n pbkrtest 0.5.5 2025-07-18 [1] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n png 0.1-8 2022-11-29 [1] RSPM (R 4.5.0)\n polynom 1.4-1 2022-04-11 [1] RSPM (R 4.5.0)\n prettyunits 1.2.0 2023-09-24 [1] RSPM (R 4.5.0)\n processx 3.8.6 2025-02-21 [1] RSPM (R 4.5.0)\n progress 1.2.3 2023-12-06 [1] RSPM (R 4.5.0)\n ps 1.9.1 2025-04-12 [1] RSPM (R 4.5.0)\n P purrr * 1.2.1 2026-01-09 [?] RSPM (R 4.5.0)\n quantreg 6.1 2025-03-10 [1] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n ragg 1.5.0 2025-09-02 [1] RSPM (R 4.5.0)\n rappdirs 0.3.4 2026-01-17 [1] RSPM (R 4.5.0)\n rbibutils 2.4.1 2026-01-21 [1] RSPM (R 4.5.0)\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM (R 4.5.0)\n Rcpp 1.1.1 2026-01-10 [1] RSPM (R 4.5.0)\n RcppArmadillo 15.2.3-1 2025-12-17 [1] RSPM (R 4.5.0)\n RcppEigen 0.3.4.0.2 2024-08-24 [1] RSPM (R 4.5.0)\n Rdpack 2.6.6 2026-02-08 [1] RSPM (R 4.5.0)\n P readr * 2.1.6 2025-11-14 [?] RSPM (R 4.5.0)\n readxl 1.4.5 2025-03-07 [1] RSPM (R 4.5.0)\n reformulas 0.4.4 2026-02-02 [1] RSPM (R 4.5.0)\n rematch 2.0.0 2023-08-30 [1] RSPM (R 4.5.0)\n rematch2 2.1.2 2020-05-01 [1] RSPM (R 4.5.0)\n reprex 2.1.1 2024-07-06 [1] RSPM (R 4.5.0)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n P rstatix 0.7.3 2025-10-18 [?] RSPM (R 4.5.0)\n rstudioapi 0.18.0 2026-01-16 [1] RSPM (R 4.5.0)\n rvest 1.0.5 2025-08-29 [1] RSPM (R 4.5.0)\n P S7 0.2.1 2025-11-14 [?] RSPM (R 4.5.0)\n sass 0.4.10 2025-04-11 [1] RSPM (R 4.5.0)\n P scales 1.4.0 2025-04-24 [?] RSPM (R 4.5.0)\n selectr 0.5-1 2025-12-17 [1] RSPM (R 4.5.0)\n SparseM 1.84-2 2024-07-17 [1] RSPM (R 4.5.0)\n P stringi 1.8.7 2025-03-27 [?] RSPM (R 4.5.0)\n P stringr * 1.6.0 2025-11-04 [?] RSPM (R 4.5.0)\n P survival * 3.8-3 2024-12-17 [?] RSPM (R 4.5.0)\n P survminer * 0.5.1 2025-09-02 [?] RSPM (R 4.5.0)\n P survMisc 0.5.6 2022-04-07 [?] RSPM (R 4.5.0)\n sys 3.4.3 2024-10-04 [1] RSPM (R 4.5.0)\n systemfonts 1.3.1 2025-10-01 [1] RSPM (R 4.5.0)\n textshaping 1.0.4 2025-10-10 [1] RSPM (R 4.5.0)\n P tibble * 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyr * 1.3.2 2025-12-19 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM (R 4.5.0)\n P timechange 0.4.0 2026-01-29 [?] RSPM (R 4.5.0)\n timeDate 4052.112 2026-01-28 [1] RSPM (R 4.5.0)\n tinytex 0.58 2025-11-19 [1] RSPM (R 4.5.0)\n P tzdb 0.5.0 2025-03-15 [?] RSPM (R 4.5.0)\n urca 1.3-4 2024-05-27 [1] RSPM (R 4.5.0)\n P utf8 1.2.6 2025-06-08 [?] RSPM (R 4.5.0)\n uuid 1.2-2 2026-01-23 [1] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n viridisLite 0.4.3 2026-02-04 [1] RSPM (R 4.5.0)\n vroom 1.7.0 2026-01-27 [1] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n xml2 1.5.2 2026-01-17 [1] RSPM (R 4.5.0)\n P xtable 1.8-4 2019-04-21 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n P zoo 1.8-15 2025-12-15 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.3/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n", "supporting": [ "survival_files" ], diff --git a/_freeze/R/survival/figure-html/unnamed-chunk-8-1.png b/_freeze/R/survival/figure-html/unnamed-chunk-8-1.png index 1f08b70a5..545e0bb8f 100644 Binary files a/_freeze/R/survival/figure-html/unnamed-chunk-8-1.png and b/_freeze/R/survival/figure-html/unnamed-chunk-8-1.png differ diff --git a/_freeze/R/survival_cif/execute-results/html.json b/_freeze/R/survival_cif/execute-results/html.json index 25661bb5e..f1d2047b9 100644 --- a/_freeze/R/survival_cif/execute-results/html.json +++ b/_freeze/R/survival_cif/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "4fcf793735b4f78f44011c9ec89a118e", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Estimating Cumulative Incidence Functions Using R\"\n---\n\n## Objective\n\nIn this document we present how to estimate the cumulative incidence function (CIF) in R. We focus on the competing risks model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(ggsurvfit)\nlibrary(survival)\nlibrary(tidycmprsk)\nlibrary(tidyverse)\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\n## R Packages\n\nWe identify three packages:\n\n- `cmprsk` \n\n- `tidycmprsk` \n\n- `survival` \n\nThe `cmprsk` package implements the methods described in Gray (1988) for testing CIFs across different groups. The `tidycmprsk` package is a wrapper for `cmprsk`. It uses syntax similar to other survival analysis packages, and returns `survival` objects. In this document, we illustrate how to use the `tidycmprsk` package for estimating and testing CIFs. More details and other functionalities can be found [here](https://mskcc-epi-bio.github.io/tidycmprsk/ \"tidycmprsk\").\n\nThe `survival` package is a general purpose survival analysis package. Its scope is far beyond the competing risks model. We will demonstrate how to estimate the CIFs using this package.\n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups: ALL, AML-Low Risk, AML-High Risk.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days. This variable is not used here.\n\n- A new variable `ID` is created.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbmt <- haven::read_sas(file.path(\"../data/bmt.sas7bdat\")) |>\n mutate(\n Group = factor(\n Group,\n levels = c(1, 2, 3),\n labels = c('ALL', 'AML-Low Risk', 'AML-High Risk')\n ),\n Status = factor(\n Status,\n levels = c(0, 1, 2),\n labels = c('Censored', 'Relapse', 'Death')\n ),\n TYears = T / 365.25,\n ID = row_number()\n )\n```\n:::\n\n\n## Estimating CIFs\n\n### The `tidycmprsk` Package\n\n#### CIF Estimates and Gray's Test\n\nThe `tidycmprsk::cuminc()` function requires a `Surv` object. Therefore, the first level of the event status variable (in this example `Status`) must represent censoring.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncif.1 <- tidycmprsk::cuminc(Surv(TYears, Status) ~ Group, data = bmt)\n```\n:::\n\n\nGray's test statistics and p-value:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nknitr::kable(\n glance(cif.1) |>\n pivot_longer(\n everything(),\n names_to = c(\".value\", \"outcome_id\"),\n names_pattern = \"(.*)_(.*)\"\n )\n)\n```\n\n::: {.cell-output-display}\n\n\n|outcome_id |outcome | statistic| df| p.value|\n|:----------|:-------|----------:|--:|---------:|\n|1 |Relapse | 11.9228820| 2| 0.0025762|\n|2 |Death | 0.1374108| 2| 0.9336017|\n\n\n:::\n:::\n\n\nCIF estimates for time to relapse at selected timepoints for 'AML-Low Risk' patients:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nknitr::kable(\n cif.1 |>\n tidy(times = c(0.5, 1, 1.5, 2, 3)) |>\n select(time, outcome, strata, estimate, std.error, conf.low, conf.high) |>\n filter(outcome == 'Relapse' & strata == 'AML-Low Risk') |>\n mutate(\n time = as.character(time),\n across(where(is.numeric), ~ num(., digits = 4))\n )\n)\n```\n\n::: {.cell-output-display}\n\n\n|time |outcome |strata | estimate| std.error| conf.low| conf.high|\n|:----|:-------|:------------|--------:|---------:|--------:|---------:|\n|0.5 |Relapse |AML-Low Risk | 0.0000| 0.0000| NA| NA|\n|1 |Relapse |AML-Low Risk | 0.0741| 0.0360| 0.0234| 0.1646|\n|1.5 |Relapse |AML-Low Risk | 0.1296| 0.0463| 0.0563| 0.2344|\n|2 |Relapse |AML-Low Risk | 0.1481| 0.0489| 0.0685| 0.2565|\n|3 |Relapse |AML-Low Risk | 0.1667| 0.0514| 0.0813| 0.2783|\n\n\n:::\n:::\n\n\nTwo points to note:\n\n1. The current version of `cmprsk`, and hence `tidycmprsk,` estimates the variance of the CIF estimates asymptotically as in Aalen (1978). There is no option to change it to other methods.\n\n2. `tidycmprsk::cuminc()` offers pointwise CIs for the CIF estimates using the log-log transforms. There is no other options.\n\n#### CIF Plots\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncif.1 |>\n ggsurvfit::ggcuminc(outcome = 'Death') +\n ## add_confidence_interval() +\n ggsurvfit::add_risktable() +\n xlab('Time (years) to death')\n```\n\n::: {.cell-output-display}\n![](survival_cif_files/figure-html/cr.plot-1.png){width=672}\n:::\n\n```{.r .cell-code}\ncif.1 |>\n ggsurvfit::ggcuminc(outcome = 'Relapse') +\n ## add_confidence_interval() +\n ggsurvfit::add_risktable() +\n xlab('Time (years) to relapse')\n```\n\n::: {.cell-output-display}\n![](survival_cif_files/figure-html/cr.plot-2.png){width=672}\n:::\n:::\n\n\n### The `survival` Package\n\n#### CIF Estimates\n\nUsing the bone marrow transplant example, the following code shows how to estimate the CIF for time to relapse or to death:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncif.2 <- survival::survfit(\n survival::Surv(TYears, Status) ~ Group,\n data = bmt,\n se.fit = TRUE,\n conf.type = 'log-log', ## default is 'log'\n id = ID,\n robust = TRUE ## default for multi-state model\n)\n## summary(cif.2)\n```\n:::\n\n\nA few points to note:\n\n1. `survfit()` returns the probability of being in a state (`pstate`). The CIF is its complement, i.e., `CIF = 1 - pstate`.\n\n2. Gray's test for testing equality across groups is not performed.\n\n3. In this example `survfit()` recognizes that the input are in multi-state model format; therefore, it estimates the variances of the CIFs with an infinitesimal jackknife (see Therneau (2024)). The CIs, as a result, are different from that estimated based on Aalen's or the delta method as done in SAS PROC LIFETEST.\n\n4. The survival package also offers a different syntax for estimating CIFs. Users need to first call `finegray()` separately for each event to reformat the data, then apply `survfit()` for each event. The CIF estimates are identical since the same estimation method is used; the variances can be different from directly calling `survfit()` with data in multi-state model format, since `finegray()` artificially extends the observed time for the competing events (see Therneau (2024)).\n\n#### CIF Plots\n\nThe same `ggcuminc()` syntax can be applied to the `survfit()` output `cif.2`.\n\n## Summary\n\nBoth `tidycmprsk::cminc()` (as inherited from `cmprsk::cuminc()`) and `survival::survfit()` implement Aalen-Johansen estimator. For competing risks it reduces to CIF, which has a closed form formulation. The results are identical as produced by SAS PROC LIFETEST.\n\nCertain options in SAS, e.g., the delta method for variance estimation or other types of transformation for calculating the CIs, are not available in the current versions of `tidycmprsk::cuminc()` or `survival::survfit()`. However, the outputs from both contain enough information that these options can be manually implemented by users. For example, Pintilie (2006) provides R code for deriving the variances for the estimated CIFs based on the delta method.\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cmprsk 2.2-12 2024-05-19 [?] RSPM\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P tidycmprsk * 1.1.1 2025-11-14 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n## Reference\n\nAalen O. (1978). Nonparametric Estimation of Partial Transition Probabilities in Multiple Decrement Models, *Annals of Statistics*, 6:534-545.\n\nGray R. (1988). A Class of K-Sample Tests for Comparing the Cumulative Incidence of a Competing Risk, *Annals of Statistics*, 16:1141-1154.\n\nGray R. (2024). *cmprsk: Subdistribution Analysis of Competing Risks*. R package version 2.2-12. \n\nGuo C and So Y. (2018). Cause-Specific Analysis of Competing Risks Using the PHREG Procedure. In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. .\n\nPintilie M. (2006). *Competing Risks: A Practical Perspectiv*e. Wiley.\\\n\n\nSjoberg D and Fei T. (2023). *tidycmprsk: Competing Risks Estimation*. \n\nTherneau T. (2024). *A Package for Survival Analysis in R*. R package version 3.7-0, [https://CRAN.R-project.org/package=survival](https://cran.r-project.org/package=survival).\n", + "markdown": "---\ntitle: \"Estimating Cumulative Incidence Functions Using R\"\n---\n\n## Objective\n\nIn this document we present how to estimate the cumulative incidence function (CIF) in R. We focus on the competing risks model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(ggsurvfit)\nlibrary(survival)\nlibrary(tidycmprsk)\nlibrary(tidyverse)\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\n## R Packages\n\nWe identify three packages:\n\n- `cmprsk` \n\n- `tidycmprsk` \n\n- `survival` \n\nThe `cmprsk` package implements the methods described in Gray (1988) for testing CIFs across different groups. The `tidycmprsk` package is a wrapper for `cmprsk`. It uses syntax similar to other survival analysis packages, and returns `survival` objects. In this document, we illustrate how to use the `tidycmprsk` package for estimating and testing CIFs. More details and other functionalities can be found [here](https://mskcc-epi-bio.github.io/tidycmprsk/ \"tidycmprsk\").\n\nThe `survival` package is a general purpose survival analysis package. Its scope is far beyond the competing risks model. We will demonstrate how to estimate the CIFs using this package.\n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups: ALL, AML-Low Risk, AML-High Risk.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days. This variable is not used here.\n\n- A new variable `ID` is created.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbmt <- haven::read_sas(file.path(\"../data/bmt.sas7bdat\")) |>\n mutate(\n Group = factor(\n Group,\n levels = c(1, 2, 3),\n labels = c('ALL', 'AML-Low Risk', 'AML-High Risk')\n ),\n Status = factor(\n Status,\n levels = c(0, 1, 2),\n labels = c('Censored', 'Relapse', 'Death')\n ),\n TYears = T / 365.25,\n ID = row_number()\n )\n```\n:::\n\n\n## Estimating CIFs\n\n### The `tidycmprsk` Package\n\n#### CIF Estimates and Gray's Test\n\nThe `tidycmprsk::cuminc()` function requires a `Surv` object. Therefore, the first level of the event status variable (in this example `Status`) must represent censoring.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncif.1 <- tidycmprsk::cuminc(Surv(TYears, Status) ~ Group, data = bmt)\n```\n:::\n\n\nGray's test statistics and p-value:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nknitr::kable(\n glance(cif.1) |>\n pivot_longer(\n everything(),\n names_to = c(\".value\", \"outcome_id\"),\n names_pattern = \"(.*)_(.*)\"\n )\n)\n```\n\n::: {.cell-output-display}\n\n\n|outcome_id |outcome | statistic| df| p.value|\n|:----------|:-------|----------:|--:|---------:|\n|1 |Relapse | 11.9228820| 2| 0.0025762|\n|2 |Death | 0.1374108| 2| 0.9336017|\n\n\n:::\n:::\n\n\nCIF estimates for time to relapse at selected timepoints for 'AML-Low Risk' patients:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nknitr::kable(\n cif.1 |>\n tidy(times = c(0.5, 1, 1.5, 2, 3)) |>\n select(time, outcome, strata, estimate, std.error, conf.low, conf.high) |>\n filter(outcome == 'Relapse' & strata == 'AML-Low Risk') |>\n mutate(\n time = as.character(time),\n across(where(is.numeric), ~ num(., digits = 4))\n )\n)\n```\n\n::: {.cell-output-display}\n\n\n|time |outcome |strata | estimate| std.error| conf.low| conf.high|\n|:----|:-------|:------------|--------:|---------:|--------:|---------:|\n|0.5 |Relapse |AML-Low Risk | 0.0000| 0.0000| NA| NA|\n|1 |Relapse |AML-Low Risk | 0.0741| 0.0360| 0.0234| 0.1646|\n|1.5 |Relapse |AML-Low Risk | 0.1296| 0.0463| 0.0563| 0.2344|\n|2 |Relapse |AML-Low Risk | 0.1481| 0.0489| 0.0685| 0.2565|\n|3 |Relapse |AML-Low Risk | 0.1667| 0.0514| 0.0813| 0.2783|\n\n\n:::\n:::\n\n\nTwo points to note:\n\n1. The current version of `cmprsk`, and hence `tidycmprsk,` estimates the variance of the CIF estimates asymptotically as in Aalen (1978). There is no option to change it to other methods.\n\n2. `tidycmprsk::cuminc()` offers pointwise CIs for the CIF estimates using the log-log transforms. There is no other options.\n\n#### CIF Plots\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncif.1 |>\n ggsurvfit::ggcuminc(outcome = 'Death') +\n ## add_confidence_interval() +\n ggsurvfit::add_risktable() +\n xlab('Time (years) to death')\n```\n\n::: {.cell-output-display}\n![](survival_cif_files/figure-html/cr.plot-1.png){width=672}\n:::\n\n```{.r .cell-code}\ncif.1 |>\n ggsurvfit::ggcuminc(outcome = 'Relapse') +\n ## add_confidence_interval() +\n ggsurvfit::add_risktable() +\n xlab('Time (years) to relapse')\n```\n\n::: {.cell-output-display}\n![](survival_cif_files/figure-html/cr.plot-2.png){width=672}\n:::\n:::\n\n\n### The `survival` Package\n\n#### CIF Estimates\n\nUsing the bone marrow transplant example, the following code shows how to estimate the CIF for time to relapse or to death:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncif.2 <- survival::survfit(\n survival::Surv(TYears, Status) ~ Group,\n data = bmt,\n se.fit = TRUE,\n conf.type = 'log-log', ## default is 'log'\n id = ID,\n robust = TRUE ## default for multi-state model\n)\n## summary(cif.2)\n```\n:::\n\n\nA few points to note:\n\n1. `survfit()` returns the probability of being in a state (`pstate`). The CIF is its complement, i.e., `CIF = 1 - pstate`.\n\n2. Gray's test for testing equality across groups is not performed.\n\n3. In this example `survfit()` recognizes that the input are in multi-state model format; therefore, it estimates the variances of the CIFs with an infinitesimal jackknife (see Therneau (2024)). The CIs, as a result, are different from that estimated based on Aalen's or the delta method as done in SAS PROC LIFETEST.\n\n4. The survival package also offers a different syntax for estimating CIFs. Users need to first call `finegray()` separately for each event to reformat the data, then apply `survfit()` for each event. The CIF estimates are identical since the same estimation method is used; the variances can be different from directly calling `survfit()` with data in multi-state model format, since `finegray()` artificially extends the observed time for the competing events (see Therneau (2024)).\n\n#### CIF Plots\n\nThe same `ggcuminc()` syntax can be applied to the `survfit()` output `cif.2`.\n\n## Summary\n\nBoth `tidycmprsk::cminc()` (as inherited from `cmprsk::cuminc()`) and `survival::survfit()` implement Aalen-Johansen estimator. For competing risks it reduces to CIF, which has a closed form formulation. The results are identical as produced by SAS PROC LIFETEST.\n\nCertain options in SAS, e.g., the delta method for variance estimation or other types of transformation for calculating the CIs, are not available in the current versions of `tidycmprsk::cuminc()` or `survival::survfit()`. However, the outputs from both contain enough information that these options can be manually implemented by users. For example, Pintilie (2006) provides R code for deriving the variances for the estimated CIFs based on the delta method.\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cmprsk 2.2-12 2024-05-19 [?] RSPM (R 4.5.0)\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P tidycmprsk * 1.1.1 2025-11-14 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n## Reference\n\nAalen O. (1978). Nonparametric Estimation of Partial Transition Probabilities in Multiple Decrement Models, *Annals of Statistics*, 6:534-545.\n\nGray R. (1988). A Class of K-Sample Tests for Comparing the Cumulative Incidence of a Competing Risk, *Annals of Statistics*, 16:1141-1154.\n\nGray R. (2024). *cmprsk: Subdistribution Analysis of Competing Risks*. R package version 2.2-12. \n\nGuo C and So Y. (2018). Cause-Specific Analysis of Competing Risks Using the PHREG Procedure. In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. .\n\nPintilie M. (2006). *Competing Risks: A Practical Perspectiv*e. Wiley.\\\n\n\nSjoberg D and Fei T. (2023). *tidycmprsk: Competing Risks Estimation*. \n\nTherneau T. (2024). *A Package for Survival Analysis in R*. R package version 3.7-0, [https://CRAN.R-project.org/package=survival](https://cran.r-project.org/package=survival).\n", "supporting": [ "survival_cif_files" ], diff --git a/_freeze/R/survival_cif/figure-html/cr.plot-1.png b/_freeze/R/survival_cif/figure-html/cr.plot-1.png index f4af3fde8..6efa99d86 100644 Binary files a/_freeze/R/survival_cif/figure-html/cr.plot-1.png and b/_freeze/R/survival_cif/figure-html/cr.plot-1.png differ diff --git a/_freeze/R/survival_cif/figure-html/cr.plot-2.png b/_freeze/R/survival_cif/figure-html/cr.plot-2.png index a5a3eaec9..4cf63e296 100644 Binary files a/_freeze/R/survival_cif/figure-html/cr.plot-2.png and b/_freeze/R/survival_cif/figure-html/cr.plot-2.png differ diff --git a/_freeze/R/survival_csh/execute-results/html.json b/_freeze/R/survival_csh/execute-results/html.json index 8ca212bfb..d33c4cfac 100644 --- a/_freeze/R/survival_csh/execute-results/html.json +++ b/_freeze/R/survival_csh/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "5c82cfc66011c3ea510f222905222d4d", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Estimating and Testing Cause Specific Hazard Ratio Using R\"\n---\n\n## Objective\n\nIn this document we present how to estimate and test cause specific hazard ratio for the probability of experiencing a certain event at a given time in a competing risks model in R. We focus on the basic model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nlibrary(tidyverse)\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\nAs this document aims to provide syntax for estimating and testing cause-specific hazard ratios using Cox's PH model for competing risks, we assume that readers have working knowledge of a competing risks framework. The [Reference] below list a few literature for a quick refresher on this topic.\n\nThe syntax given here produce results match that produced by the default settings of SAS PROC PHREG (see the companion SAS document). This is usually necessary if validating results from the two software is the objective.\n\n## R Package\n\nWe use the `survival` package in this document. \n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days.\n\n- For illustration, a categorical variable `waitCat` is created from `waitTime` as `waitCat = TRUE` if `waitTime > 200`, and `FALSE` otherwise.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbmt <- haven::read_sas(file.path(\"../data/bmt.sas7bdat\")) %>%\n mutate(\n Group = factor(\n Group,\n levels = c(1, 2, 3),\n labels = c('ALL', 'AML-Low Risk', 'AML-High Risk')\n ),\n Status = factor(\n Status,\n levels = c(0, 1, 2),\n labels = c('Censored', 'Relapse', 'Death')\n ),\n TYears = T / 365.25,\n waitCat = (WaitTime > 200),\n ID = row_number()\n )\n```\n:::\n\n\n## Estimating and testing the cause specific hazard ratio\n\nSyntax-wise there are two ways to generate the estimates and related outputs using `survival::coxph()`. They produce essentially the same results except that the global null hypotheses are different.\n\n### Syntax 1: All competing events in one go\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncsh.1 <- survival::coxph(\n survival::Surv(TYears, Status) ~ Group + survival::strata(waitCat),\n data = bmt,\n id = ID,\n ties = 'breslow', ## default is 'efron'\n robust = FALSE ## default is TRUE\n)\nsummary(csh.1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvival::coxph(formula = survival::Surv(TYears, Status) ~ Group + \n survival::strata(waitCat), data = bmt, ties = \"breslow\", \n robust = FALSE, id = ID)\n\n n= 137, number of events= 83 \n\n coef exp(coef) se(coef) z Pr(>|z|) \nGroupAML-Low Risk_1:2 -0.9453 0.3886 0.4508 -2.097 0.0360 *\nGroupAML-High Risk_1:2 0.6162 1.8519 0.3636 1.695 0.0901 .\nGroupAML-Low Risk_1:3 -0.3987 0.6712 0.3946 -1.010 0.3123 \nGroupAML-High Risk_1:3 0.1273 1.1358 0.4031 0.316 0.7521 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nGroupAML-Low Risk_1:2 0.3886 2.5736 0.1606 0.940\nGroupAML-High Risk_1:2 1.8519 0.5400 0.9081 3.777\nGroupAML-Low Risk_1:3 0.6712 1.4899 0.3097 1.455\nGroupAML-High Risk_1:3 1.1358 0.8804 0.5154 2.503\n\nConcordance= 0.619 (se = 0.031 )\nLikelihood ratio test= 17.36 on 4 df, p=0.002\nWald test = 16.13 on 4 df, p=0.003\nScore (logrank) test = 17.83 on 4 df, p=0.001\n```\n\n\n:::\n:::\n\n\nIn the output, rows with suffix `1:2` are for `Status = 2`, or `Relapse`; and `1:3` are for `Status = 3`, or `Death`. As usual, censoring must be the lowest level in `Status`, which in this example is coded `0`.\n\nSince both events (`Relapse` and `Death`) are model together, the global tests have 4 degrees of freedom, with the null hypothesis that there is no difference among different levels of `Group` in either `Relapse` or `Death`.\n\n### Syntax 2: One event at a time\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncsh.2 <- survival::coxph(\n survival::Surv(TYears, Status == 'Relapse') ~\n Group + survival::strata(waitCat),\n data = bmt,\n id = ID,\n ties = 'breslow', ## default is 'efron'\n robust = FALSE ## default is TRUE\n)\nsummary(csh.2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvival::coxph(formula = survival::Surv(TYears, Status == \"Relapse\") ~ \n Group + survival::strata(waitCat), data = bmt, ties = \"breslow\", \n robust = FALSE, id = ID)\n\n n= 137, number of events= 42 \n\n coef exp(coef) se(coef) z Pr(>|z|) \nGroupAML-Low Risk -0.9453 0.3886 0.4508 -2.097 0.0360 *\nGroupAML-High Risk 0.6162 1.8519 0.3636 1.695 0.0901 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nGroupAML-Low Risk 0.3886 2.574 0.1606 0.940\nGroupAML-High Risk 1.8519 0.540 0.9081 3.777\n\nConcordance= 0.672 (se = 0.039 )\nLikelihood ratio test= 15.37 on 2 df, p=5e-04\nWald test = 14.16 on 2 df, p=8e-04\nScore (logrank) test = 15.83 on 2 df, p=4e-04\n```\n\n\n:::\n:::\n\n\nThe results are identical to those labeled with `1:2` in the earlier outputs under Syntax 1 above. However, since only `Relapse` is modeled, the global tests have only 2 degrees of freedom with the null hypothesis that there is no difference among different levels of `Group` for `Relapse`.\n\n## Summary\n\n- In `survival::coxph()` the default method for handling ties is `ties = 'efron'`. To match results with SAS, this needs to be changed to `ties = `breslow`. \n\n- For multi-state models such as a competing risk analysis, `survival::coxph()` by default estimate the standard errors of parameter estimates with a robust sandwich estimator. To match default results with SAS, this needs to be set to `robust = FALSE`. \n\n- Due to differences in internal numerical estimation methods of R and SAS, results only match up to the 4th decimal places. However, overall consistency can be established between the two for estimating and testing cause-specific hazard ratio using Cox's PH model.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n cmprsk 2.2-12 2024-05-19 [1] RSPM\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n tidycmprsk 1.1.1 2025-11-14 [1] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n * ── Packages attached to the search path.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n## Reference\n\nGuo C and So Y. (2018). \"Cause-specific analysis of competing risks using the PHREG procedure.\" In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. .\n\nPintilie M. (2006). *Competing Risks: A Practical Perspective*. Wiley. \n\nTherneau T, Crowson C, and Atkinson E. (2024). \"Multi-state models and competing risks.\" \n", + "markdown": "---\ntitle: \"Estimating and Testing Cause Specific Hazard Ratio Using R\"\n---\n\n## Objective\n\nIn this document we present how to estimate and test cause specific hazard ratio for the probability of experiencing a certain event at a given time in a competing risks model in R. We focus on the basic model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(survival)\nlibrary(tidyverse)\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\nAs this document aims to provide syntax for estimating and testing cause-specific hazard ratios using Cox's PH model for competing risks, we assume that readers have working knowledge of a competing risks framework. The [Reference] below list a few literature for a quick refresher on this topic.\n\nThe syntax given here produce results match that produced by the default settings of SAS PROC PHREG (see the companion SAS document). This is usually necessary if validating results from the two software is the objective.\n\n## R Package\n\nWe use the `survival` package in this document. \n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days.\n\n- For illustration, a categorical variable `waitCat` is created from `waitTime` as `waitCat = TRUE` if `waitTime > 200`, and `FALSE` otherwise.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbmt <- haven::read_sas(file.path(\"../data/bmt.sas7bdat\")) %>%\n mutate(\n Group = factor(\n Group,\n levels = c(1, 2, 3),\n labels = c('ALL', 'AML-Low Risk', 'AML-High Risk')\n ),\n Status = factor(\n Status,\n levels = c(0, 1, 2),\n labels = c('Censored', 'Relapse', 'Death')\n ),\n TYears = T / 365.25,\n waitCat = (WaitTime > 200),\n ID = row_number()\n )\n```\n:::\n\n\n## Estimating and testing the cause specific hazard ratio\n\nSyntax-wise there are two ways to generate the estimates and related outputs using `survival::coxph()`. They produce essentially the same results except that the global null hypotheses are different.\n\n### Syntax 1: All competing events in one go\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncsh.1 <- survival::coxph(\n survival::Surv(TYears, Status) ~ Group + survival::strata(waitCat),\n data = bmt,\n id = ID,\n ties = 'breslow', ## default is 'efron'\n robust = FALSE ## default is TRUE\n)\nsummary(csh.1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvival::coxph(formula = survival::Surv(TYears, Status) ~ Group + \n survival::strata(waitCat), data = bmt, ties = \"breslow\", \n robust = FALSE, id = ID)\n\n n= 137, number of events= 83 \n\n coef exp(coef) se(coef) z Pr(>|z|) \nGroupAML-Low Risk_1:2 -0.9453 0.3886 0.4508 -2.097 0.0360 *\nGroupAML-High Risk_1:2 0.6162 1.8519 0.3636 1.695 0.0901 .\nGroupAML-Low Risk_1:3 -0.3987 0.6712 0.3946 -1.010 0.3123 \nGroupAML-High Risk_1:3 0.1273 1.1358 0.4031 0.316 0.7521 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nGroupAML-Low Risk_1:2 0.3886 2.5736 0.1606 0.940\nGroupAML-High Risk_1:2 1.8519 0.5400 0.9081 3.777\nGroupAML-Low Risk_1:3 0.6712 1.4899 0.3097 1.455\nGroupAML-High Risk_1:3 1.1358 0.8804 0.5154 2.503\n\nConcordance= 0.619 (se = 0.031 )\nLikelihood ratio test= 17.36 on 4 df, p=0.002\nWald test = 16.13 on 4 df, p=0.003\nScore (logrank) test = 17.83 on 4 df, p=0.001\n```\n\n\n:::\n:::\n\n\nIn the output, rows with suffix `1:2` are for `Status = 2`, or `Relapse`; and `1:3` are for `Status = 3`, or `Death`. As usual, censoring must be the lowest level in `Status`, which in this example is coded `0`.\n\nSince both events (`Relapse` and `Death`) are model together, the global tests have 4 degrees of freedom, with the null hypothesis that there is no difference among different levels of `Group` in either `Relapse` or `Death`.\n\n### Syntax 2: One event at a time\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncsh.2 <- survival::coxph(\n survival::Surv(TYears, Status == 'Relapse') ~\n Group + survival::strata(waitCat),\n data = bmt,\n id = ID,\n ties = 'breslow', ## default is 'efron'\n robust = FALSE ## default is TRUE\n)\nsummary(csh.2)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nCall:\nsurvival::coxph(formula = survival::Surv(TYears, Status == \"Relapse\") ~ \n Group + survival::strata(waitCat), data = bmt, ties = \"breslow\", \n robust = FALSE, id = ID)\n\n n= 137, number of events= 42 \n\n coef exp(coef) se(coef) z Pr(>|z|) \nGroupAML-Low Risk -0.9453 0.3886 0.4508 -2.097 0.0360 *\nGroupAML-High Risk 0.6162 1.8519 0.3636 1.695 0.0901 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n exp(coef) exp(-coef) lower .95 upper .95\nGroupAML-Low Risk 0.3886 2.574 0.1606 0.940\nGroupAML-High Risk 1.8519 0.540 0.9081 3.777\n\nConcordance= 0.672 (se = 0.039 )\nLikelihood ratio test= 15.37 on 2 df, p=5e-04\nWald test = 14.16 on 2 df, p=8e-04\nScore (logrank) test = 15.83 on 2 df, p=4e-04\n```\n\n\n:::\n:::\n\n\nThe results are identical to those labeled with `1:2` in the earlier outputs under Syntax 1 above. However, since only `Relapse` is modeled, the global tests have only 2 degrees of freedom with the null hypothesis that there is no difference among different levels of `Group` for `Relapse`.\n\n## Summary\n\n- In `survival::coxph()` the default method for handling ties is `ties = 'efron'`. To match results with SAS, this needs to be changed to `ties = `breslow`. \n\n- For multi-state models such as a competing risk analysis, `survival::coxph()` by default estimate the standard errors of parameter estimates with a robust sandwich estimator. To match default results with SAS, this needs to be set to `robust = FALSE`. \n\n- Due to differences in internal numerical estimation methods of R and SAS, results only match up to the 4th decimal places. However, overall consistency can be established between the two for estimating and testing cause-specific hazard ratio using Cox's PH model.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n cmprsk 2.2-12 2024-05-19 [1] RSPM (R 4.5.0)\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n tidycmprsk 1.1.1 2025-11-14 [1] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n\n## Reference\n\nGuo C and So Y. (2018). \"Cause-specific analysis of competing risks using the PHREG procedure.\" In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. .\n\nPintilie M. (2006). *Competing Risks: A Practical Perspective*. Wiley. \n\nTherneau T, Crowson C, and Atkinson E. (2024). \"Multi-state models and competing risks.\" \n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/tipping_point/execute-results/html.json b/_freeze/R/tipping_point/execute-results/html.json index e6ef79535..2a4418940 100644 --- a/_freeze/R/tipping_point/execute-results/html.json +++ b/_freeze/R/tipping_point/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "5b71f59904e85b71561a5ceb7998ad13", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"R Tipping Point (Delta Adjustment): Continuous Data\"\n---\n\n\n\n# Tipping Point / Delta Adjustment\n\n## Setup\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# General libraries\nlibrary(mice)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(gt)\nlibrary(labelled)\nlibrary(purrr)\nlibrary(ggplot2)\nlibrary(gridExtra)\n\n# Methodology specific libraries\nlibrary(emmeans)\nlibrary(mmrm)\nlibrary(rstan)\nlibrary(rbmi)\n\n# Paralleisation libraries\nlibrary(future)\nlibrary(furrr)\nlibrary(parallelly)\n```\n:::\n\n\n### Random seed\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(12345)\n```\n:::\n\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\n\nThe concept of delta adjustment and tipping point analysis builds on the framework of reference-based multiple imputation (rbmi) as seen on its respective [CAMIS webpage](../R/rbmi_continuous_joint.html). The use of the `rbmi` package in R ([Gower-Page et al. 2022](https://joss.theoj.org/papers/10.21105/joss.04251)) for the following standard and reference-based multiple imputation approaches are introduced there:\n\n- Missing At Random (MAR)\n\n- Jump to Reference (JR)\n\n- Copy Reference (CR)\n\n- Copy Increment from Reference (CIR)\n\nPlease make sure to familiarize yourself with these functionalities of the `rbmi` package before checking this tutorial. The outline of this page generally follows the [rbmi advanced functionality vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses).\n\n### Data\n\nThe same publicly available [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial that was used to illustrate `rbmi` is again used for this tutorial. This dataset is also used in the [rbmi quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html).\n\nThe relevant endpoint for the antidepressant trial was assessed using the Hamilton 17-item depression rating scale (HAMD17), which was measured at baseline and subsequently at weeks 1, 2, 3, 4 and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects in the active drug group, compared to 26% (23/88) of subjects in the placebo group. Importantly, all data after study drug discontinuation are missing and there is a single intermittent missing observation.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"antidepressant_data\")\n\ndat <- antidepressant_data |>\n dplyr::select(\n PATIENT,\n GENDER,\n THERAPY,\n RELDAYS,\n VISIT,\n BASVAL,\n HAMDTL17,\n CHANGE\n ) |>\n dplyr::mutate(THERAPY = factor(THERAPY, levels = c(\"PLACEBO\", \"DRUG\"))) |>\n labelled::remove_labels()\n\ngt(head(dat, n = 10))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1503FDRUG743221-11
1503FDRUG1453220-12
1503FDRUG2863219-13
1503FDRUG4273217-15
1507FPLACEBO741411-3
1507FPLACEBO15514140
1507FPLACEBO296149-5
1507FPLACEBO427145-9
1509FDRUG742120-1
1509FDRUG1452118-3
\n
\n```\n\n:::\n:::\n\n\nThe number of patients per visit and treatment group are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n dplyr::summarise(N = n(), .by = c(VISIT, THERAPY))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 3\n VISIT THERAPY N\n \n1 4 DRUG 84\n2 5 DRUG 77\n3 6 DRUG 73\n4 7 DRUG 64\n5 4 PLACEBO 88\n6 5 PLACEBO 81\n7 6 PLACEBO 76\n8 7 PLACEBO 65\n```\n\n\n:::\n:::\n\n\nThe mean change from baseline of the HAMD17 endpoint per visit and treatment group using only the complete cases are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n dplyr::summarise(N = n(), MEAN = mean(CHANGE), .by = c(VISIT, THERAPY))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 4\n VISIT THERAPY N MEAN\n \n1 4 DRUG 84 -1.82\n2 5 DRUG 77 -4.71\n3 6 DRUG 73 -6.79\n4 7 DRUG 64 -8.34\n5 4 PLACEBO 88 -1.51\n6 5 PLACEBO 81 -2.70\n7 6 PLACEBO 76 -4.07\n8 7 PLACEBO 65 -5.14\n```\n\n\n:::\n:::\n\n\nThe missingness pattern is:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_wide = dat |>\n dplyr::select(PATIENT, VISIT, CHANGE) |>\n pivot_wider(\n id_cols = PATIENT,\n names_from = VISIT,\n names_prefix = \"VISIT_\",\n values_from = CHANGE\n )\n\ndat_wide |>\n dplyr::select(starts_with(\"VISIT_\")) |>\n mice::md.pattern(plot = TRUE, rotate.names = TRUE)\n```\n\n::: {.cell-output-display}\n![](tipping_point_files/figure-html/data_exploration_4-1.png){width=672}\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n VISIT_4 VISIT_5 VISIT_6 VISIT_7 \n128 1 1 1 1 0\n20 1 1 1 0 1\n10 1 1 0 0 2\n1 1 0 1 1 1\n13 1 0 0 0 3\n 0 14 23 43 80\n```\n\n\n:::\n:::\n\n\nThere is a single patient with an intermittent missing observation at visit 5, which is patient 3618. Special considerations need to be taken when applying delta adjustments to intermittent missing observations like this one (more on this later).\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_expand <- rbmi::expand_locf(\n dat,\n PATIENT = levels(dat$PATIENT),\n VISIT = levels(dat$VISIT),\n vars = c(\"BASVAL\", \"THERAPY\", \"GENDER\"),\n group = c(\"PATIENT\"),\n order = c(\"PATIENT\", \"VISIT\")\n)\n\ndat_expand |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
3618MDRUG848157
3618MDRUGNA58NANA
3618MDRUG2868146
3618MDRUG4278102
\n
\n```\n\n:::\n:::\n\n\n### Preparation\n\nThis tutorial will focus on tipping point analysis and delta adjustment. We assume the user used the `rbmi` package to create an imputation object called `imputeObj` (see [CAMIS webpage](../R/rbmi_continuous_joint.html)).\n\n\n\n\n\n\n\n## Tipping point analysis and delta adjustment\n\n### Methodology introduction\n\nWhen analyses for endpoints are performed under MAR or MNAR assumptions for missing data, it is important to perform sensitivity analyses to assess the impact of deviations from these assumptions. Tipping point analysis (or delta adjustment method) is an example of a sensitivity analysis that can be used to assess the robustness of a clinical trial when its result is based on imputed missing data.\n\nGenerally, tipping point analysis explores the influence of missingness on the overall conclusion of the treatment difference by shifting imputed missing values in the treatment group towards the reference group until the result becomes non-significant. The tipping point is the minimum shift needed to make the result non-significant. If the minimum shift needed to make the result non-significant is implausible, then greater confidence in the primary results can be inferred.\n\nTipping point analysis generally happens by adjusting imputing values by so-called delta values. The observed tipping point is the minimum delta needed to make the result non-significant. Mostly a range of delta values is explored and only imputed values from the active treatment group are adjusted by the delta value. However, delta adjustments in the control group are possible as well. Naturally, the range of acceptable values for delta should be agreed a priori, before taking this approach.\n\nFor an extensive discussion on delta adjustment methods, we refer to [Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/).\n\n## Simple delta adjustments\n\n### Generate delta's\n\nIn the `rbmi` package, the `delta` argument of the `analyse()` function allows users to adjust the imputed datasets prior to the analysis stage. This `delta` argument requires a data frame created by `delta_template()`, which includes a column called `delta` that specifies the delta values to be added.\n\nBy default, `delta_template()` will set `delta` to 0 for all observations.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndat_delta_0 <- delta_template(imputations = imputeObj)\n\n\ndat_delta_0 |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR0
15136DRUGTRUETRUETRUEMAR0
15137DRUGTRUETRUETRUEMAR0
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR0
15146PLACEBOTRUETRUETRUEMAR0
15147PLACEBOTRUETRUETRUEMAR0
\n
\n```\n\n:::\n:::\n\n\nYou can add the delta values to the outcome variable (CHANGE) of one of the imputed datasets by using the `apply_delta()` function. Of course, nothing is changed here as delta = 0.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimputed_dfs = rbmi::extract_imputed_dfs(imputeObj)\nMI_10 = imputed_dfs[[10]]\nMI_10$PATIENT2 = MI_10$PATIENT\nMI_10$PATIENT = dat_expand$PATIENT\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |> head(8))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA-2.973035new_pt_5
1513MDRUGNA619NA-7.597362new_pt_5
1513MDRUGNA719NA-4.555403new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA5.442236new_pt_6
1514FPLACEBONA621NA1.908540new_pt_6
1514FPLACEBONA721NA5.571921new_pt_6
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_0,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA-2.973035new_pt_5
1513MDRUGNA619NA-7.597362new_pt_5
1513MDRUGNA719NA-4.555403new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA5.442236new_pt_6
1514FPLACEBONA621NA1.908540new_pt_6
1514FPLACEBONA721NA5.571921new_pt_6
\n
\n```\n\n:::\n:::\n\n\nYou may have noticed that the `is_missing` and `is_post_ice` columns of the delta data frame lend themselves perfectly to adjust the delta values, as the boolean variables `TRUE` and `FALSE` are regarded as 1 and 0 by R. If you want to set delta to 5 for all missing values, you can do so by multiplying the `is_missing` column by 5. In our case, this addition assumes a \"worsening\" of the imputed outcome variable, CHANGE, which is measured on the HAMD17 scale.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndat_delta_5_v1 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * 5)\n\ndat_delta_5_v1 |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR5
15136DRUGTRUETRUETRUEMAR5
15137DRUGTRUETRUETRUEMAR5
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR5
15146PLACEBOTRUETRUETRUEMAR5
15147PLACEBOTRUETRUETRUEMAR5
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |> head(8))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA-2.973035new_pt_5
1513MDRUGNA619NA-7.597362new_pt_5
1513MDRUGNA719NA-4.555403new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA5.442236new_pt_6
1514FPLACEBONA621NA1.908540new_pt_6
1514FPLACEBONA721NA5.571921new_pt_6
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v1,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.0000000new_pt_5
1513MDRUGNA519NA2.0269651new_pt_5
1513MDRUGNA619NA-2.5973620new_pt_5
1513MDRUGNA719NA0.4445972new_pt_5
1514FPLACEBO7421232.0000000new_pt_6
1514FPLACEBONA521NA10.4422359new_pt_6
1514FPLACEBONA621NA6.9085397new_pt_6
1514FPLACEBONA721NA10.5719214new_pt_6
\n
\n```\n\n:::\n:::\n\n\nImportantly, if you multiply the `is_missing` column only, you apply the delta adjustment to **all** imputed missing values, including intermittent missing values. This can be checked by looking at patient 3618, which has an intermittent missing value at visit 5.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ngt(dat_delta_5_v1 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
36184DRUGTRUEFALSEFALSENA0
36185DRUGTRUETRUEFALSEMAR5
36186DRUGTRUEFALSEFALSENA0
36187DRUGTRUEFALSEFALSENA0
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA1.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v1,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA6.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n:::\n\n\nIf you consider the `is_post_ice` column too, you can restrict the delta adjustment to missing values that occur after study drug discontinuation due to an intercurrent event (ICE). By multiplying both the `is_missing` and `is_post_ice` columns by your chosen delta, the delta value will only be added when both columns are `TRUE`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndat_delta_5_v2 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * is_post_ice * 5)\n\ndat_delta_5_v2 |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
36184DRUGTRUEFALSEFALSENA0
36185DRUGTRUETRUEFALSEMAR0
36186DRUGTRUEFALSEFALSENA0
36187DRUGTRUEFALSEFALSENA0
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA1.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v2,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA1.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n:::\n\n\nBesides choosing which missing data to apply the delta adjustment to, you may also want to apply different delta adjustments to imputed data from the different groups. As an example, let's set delta = 0 for the control group, and delta = 5 for the intervention group. Here, we consider the `is_missing` column only, so that we apply the delta's to **all** imputed missing data.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndelta_control = 0\ndelta_intervention = 5\n\ndat_delta_0_5 <- rbmi::delta_template(imputations = imputeObj) |>\n mutate(\n delta_ctl = (THERAPY == \"PLACEBO\") * is_missing * delta_control,\n delta_int = (THERAPY == \"DRUG\") * is_missing * delta_intervention,\n delta = delta_ctl + delta_int\n )\n\ndat_delta_0_5 |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydeltadelta_ctldelta_int
15134DRUGTRUEFALSEFALSENA000
15135DRUGTRUETRUETRUEMAR505
15136DRUGTRUETRUETRUEMAR505
15137DRUGTRUETRUETRUEMAR505
15144PLACEBOTRUEFALSEFALSENA000
15145PLACEBOTRUETRUETRUEMAR000
15146PLACEBOTRUETRUETRUEMAR000
15147PLACEBOTRUETRUETRUEMAR000
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_0_5,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.0000000new_pt_5
1513MDRUGNA519NA2.0269651new_pt_5
1513MDRUGNA619NA-2.5973620new_pt_5
1513MDRUGNA719NA0.4445972new_pt_5
1514FPLACEBO7421232.0000000new_pt_6
1514FPLACEBONA521NA5.4422359new_pt_6
1514FPLACEBONA621NA1.9085397new_pt_6
1514FPLACEBONA721NA5.5719214new_pt_6
\n
\n```\n\n:::\n:::\n\n\nThe `delta_template()` function has two additional arguments, `delta` and `dlag`, that can be used to define the delta adjustments. We explain these arguments in more detail in the *flexible delta adjustments* section below.\n\n### Run analysis model with delta adjustment\n\nAs mentioned, delta adjustments are implemented via the `delta` argument of the `analyse()` function. The adjustment happens right after data imputation under MAR or MNAR (using reference-based imputation approaches), but before implementing the analysis model. Sensitivity analyses can therefore be performed without having to refit the imputation model, which is computationally efficient. This approach is considered a *marginal* delta adjustment approach, because the delta is simply added to the mean of the conditional multivariate normal distribution (conditional on the observed values and the covariates) for the imputation model ([Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data)).\n\nHere, we apply the delta adjustment of 5 to **all** imputed values of the outcome variable (CHANGE) in the intervention group. The estimated treatment effect at visit 7 is presented below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nanaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n delta = dat_delta_0_5,\n vars = vars_analyse\n)\n\npoolObj <- rbmi::pool(anaObj)\n\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-1.5735521.1603874-3.8670890.71998581.771976e-01
lsm_ref_7-4.8478520.8057825-6.440495-3.25520901.396893e-08
lsm_alt_7-6.4214040.8301986-8.062492-4.78031491.714042e-12
\n
\n```\n\n:::\n:::\n\n\n## Tipping point analysis: MAR approach\n\n### Generate delta's: sequential delta adjustment for intervention arm\n\nTo perform a tipping point analysis under the MAR assumption, we must create a range of delta values. In this section, we only specify a range of delta's for the intervention group.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndelta_df1 <- expand.grid(\n delta_control = 0,\n delta_intervention = seq(-3, 8, by = 1)\n) |>\n as_tibble()\n```\n:::\n\n\n### Perform tipping point analysis\n\nTo enable a tipping point analysis within a single function, we create `perform_tipp_analysis()`. This custom function requires a stratified delta for `delta_control` and `delta_intervention`, alongside `cl` as set in the previous step.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nperform_tipp_analysis <- function(delta_control, delta_intervention) {\n dat_delta <- rbmi::delta_template(imputeObj) |>\n mutate(\n delta_ctl = (THERAPY == \"PLACEBO\") * is_missing * delta_control,\n delta_int = (THERAPY == \"DRUG\") * is_missing * delta_intervention,\n delta = delta_ctl + delta_int\n )\n\n anaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n delta = dat_delta,\n vars = vars_analyse\n )\n\n poolObj <- as.data.frame(pool(anaObj)) |>\n dplyr::filter(grepl(\"trt_7\", parameter))\n\n list(\n trt_7 = poolObj[[\"est\"]],\n pval_7 = poolObj[[\"pval\"]],\n lci_7 = poolObj[[\"lci\"]],\n uci_7 = poolObj[[\"uci\"]]\n )\n}\n```\n:::\n\n\nNow, let's apply this function to the antidepressant data as follows:\n\n**Note:** here we are adding some parallelisation using {furrr} to speed things up.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nworkers <- parallelly::availableCores(omit = 1)\nfuture::plan(multisession, workers = workers)\n\nMAR_tipp_df1 <- delta_df1 |> \n furrr::future_pmap(perform_tipp_analysis) |>\n purrr::reduce(bind_rows) \n\nMAR_tipp_df1 <- dplyr::bind_cols(delta_df1, MAR_tipp_df1)\n```\n:::\n\n\nThe results of the tipping point analysis under MAR with p-value $\\geq$ 0.05 are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_tipp_df1 |>\n filter(pval_7 >= 0.05) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
delta_controldelta_interventiontrt_7pval_7lci_7uci_7
03-2.0595760.07254522-4.3100130.1908596
04-1.8165640.11593824-4.0868040.4536758
05-1.5735520.17719757-3.8670890.7199858
06-1.3305390.25891287-3.6507620.9896844
07-1.0875270.36193374-3.4377101.2626565
08-0.8445140.48484698-3.2278071.5387788
\n
\n```\n\n:::\n:::\n\n\nThe results of the tipping point analysis under MAR with p-value $<$ 0.05 are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_tipp_df1 |>\n filter(pval_7 < 0.05) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
delta_controldelta_interventiontrt_7pval_7lci_7uci_7
0-3-3.5176510.002004271-5.726732-1.3085707
0-2-3.2746390.003905366-5.481171-1.0681066
0-1-3.0316260.007459998-5.239415-0.8238383
00-2.7886140.013881767-5.001455-0.5757725
01-2.5456010.025021537-4.767268-0.3239349
02-2.3025890.043476237-4.536808-0.0683701
\n
\n```\n\n:::\n:::\n\n\nWe can derive an **exact** tipping point by linearly interpolating between the last \"significant\" delta and the first \"non-significant\" delta using the `approx()` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndelta_tp <- approx(\n x = MAR_tipp_df1$pval_7,\n y = MAR_tipp_df1$delta_intervention,\n xout = 0.05\n)$y\n\ntrt_tp <- approx(\n x = MAR_tipp_df1$delta_intervention,\n y = MAR_tipp_df1$trt_7,\n xout = delta_tp\n)$y\n\nlci_tp <- approx(\n x = MAR_tipp_df1$delta_intervention,\n y = MAR_tipp_df1$lci_7,\n xout = delta_tp\n)$y\n\nuci_tp <- approx(\n x = MAR_tipp_df1$delta_intervention,\n y = MAR_tipp_df1$uci_7,\n xout = delta_tp\n)$y\n\ndata.frame(\n delta_control = 0,\n delta_intervention = delta_tp,\n trt_7 = trt_tp,\n pval_7 = 0.05,\n lci_7 = lci_tp,\n uci_7 = uci_tp\n) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n \n
delta_controldelta_interventiontrt_7pval_7lci_7uci_7
02.224423-2.2480510.05-4.48591-0.01019287
\n
\n```\n\n:::\n:::\n\n\n### Visualize results\n\nA nice visualization of this tipping point analysis for the MAR approach is shown below. The dashed horizontal line indicates a p-value of 0.05 in the left plot and no treatment effect in the right plot.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_est <- ggplot(MAR_tipp_df1, aes(delta_intervention, trt_7)) +\n geom_line() +\n geom_point() +\n geom_ribbon(\n aes(delta_intervention, ymin = lci_7, ymax = uci_7),\n alpha = 0.25\n ) +\n geom_hline(yintercept = 0.0, linetype = 2) +\n geom_vline(xintercept = delta_tp, linetype = 2) +\n scale_x_continuous(breaks = seq(-6, 10, 2)) +\n labs(x = \"Delta (intervention)\", y = \"Treatment effect (95% CI)\")\n\nMAR_pval <- ggplot(MAR_tipp_df1, aes(delta_intervention, pval_7)) +\n geom_line() +\n geom_point() +\n geom_hline(yintercept = 0.05, linetype = 2) +\n geom_vline(xintercept = delta_tp, linetype = 2) +\n scale_x_continuous(breaks = seq(-6, 10, 2)) +\n labs(x = \"Delta (intervention)\", y = \"P-value\")\n\ngrid.arrange(MAR_pval, MAR_est, nrow = 1)\n```\n\n::: {.cell-output-display}\n![](tipping_point_files/figure-html/MAR_v1_tipping_point_visualization-1.png){width=672}\n:::\n:::\n\n\nWe clearly see that the p-value under MAR reaches a tipping point from 3 onward in the range of delta's considered.\n\n### Delta adjustment for control and intervention arms\n\nLet's now create a sequence of delta's for the control group too, and carry out a second tipping point analysis under the MAR assumption.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndelta_df2 <- expand.grid(\n delta_control = seq(-3, 8, by = 1),\n delta_intervention = seq(-3, 8, by = 1)\n) |>\n as_tibble()\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_tipp_df2 <- delta_df2 |>\n furrr::future_pmap(perform_tipp_analysis) |>\n purrr::reduce(bind_rows) \n\n# Adding back the delta's used for reference \nMAR_tipp_df2 <- dplyr::bind_cols(delta_df2, MAR_tipp_df2) |>\n mutate(\n pval = cut(\n pval_7,\n c(0, 0.001, 0.01, 0.05, 0.2, 1),\n right = FALSE,\n labels = c(\n \"<0.001\",\n \"0.001 - <0.01\",\n \"0.01 - <0.05\",\n \"0.05 - <0.20\",\n \">= 0.20\"\n )\n )\n ) \n```\n:::\n\n\nWe can visualize the result of this tipping point analysis using a heatmap. Here, the **(0,0)** point corresponds to the original result without any delta adjustment (p \\~ 0.0125).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_heat <- ggplot(\n MAR_tipp_df2,\n aes(delta_control, delta_intervention, fill = pval)\n) +\n geom_raster() +\n scale_fill_manual(\n values = c(\"darkgreen\", \"lightgreen\", \"lightyellow\", \"orange\", \"red\")\n ) +\n scale_x_continuous(breaks = seq(-5, 10, 1)) +\n scale_y_continuous(breaks = seq(-5, 10, 1)) +\n labs(x = \"Delta (control)\", y = \"Delta (intervention)\", fill = \"P-value\")\nMAR_heat\n```\n\n::: {.cell-output-display}\n![](tipping_point_files/figure-html/MAR_v2_tipping_point_visualization-1.png){width=672}\n:::\n:::\n\n\n## Comparison with rbmi MNAR approaches\n\n### Summary of results\n\nIn the table below we present the results of the different imputation strategies with varying number of multiple imputation draws, M = 500 and M = 5000. Note that the results can be slightly different from the results above due to a possible different seed. The estimates show the contrast at visit 7 between DRUG and PLACEBO (DRUG - PLACEBO). Delta adjustments were applied to **all** imputed missing data in the intervention group only.\n\n| Method | Delta control | Delta intervention at TP | Estimate at TP | 95% CI | P-value | Original estimate | Original p-value |\n|---------|---------|---------|---------|---------|---------|---------|---------|\n| MI - MAR (M=500) | 0 | 3 | -2.074 | -4.324 to 0.176 | 0.0709 | -2.798 | 0.0135 |\n| MI - MAR (M=5000) | 0 | 3 | -2.100 | -4.354 to 0.154 | 0.0675 | -2.829 | 0.0128 |\n| MI - MNAR JR (M=500) | 0 | -1 | -2.380 | -4.595 to -0.165 | 0.0354 | -2.137 | 0.0602 |\n| MI - MNAR JR (M=5000) | 0 | -1 | -2.383 | -4.608 to -0.157 | 0.0361 | -2.140 | 0.0611 |\n| MI - MNAR CR (M=500) | 0 | 1 | -2.151 | -4.359 to 0.057 | 0.0561 | -2.394 | 0.0326 |\n| MI - MNAR CR (M=5000) | 0 | 1 | -2.162 | -4.377 to 0.054 | 0.0558 | -2.405 | 0.0324 |\n| MI - MNAR CIR (M=500) | 0 | 2 | -1.986 | -4.211 to 0.239 | 0.0798 | -2.472 | 0.0274 |\n| MI - MNAR CIR (M=5000) | 0 | 2 | -1.994 | -4.227 to 0.239 | 0.0796 | -2.480 | 0.0274 |\n\nOf all considered approaches, the MAR approach yields the largest delta adjustment at its tipping point, with a delta intervention of 3 at both M = 500 and M = 5000. This indicates that the MAR assumption is the most robust against slight deviations of its conditions. Notice that for the MNAR JR approach we included, for completeness, tipping point analyses to know when the results switch from non-significant to significant. Correspondingly, two negative delta's (-1) are found at the tipping point for M = 500 and M = 5000. This is expected, given that the original analyses are non-significant (p \\~ 0.0602 and p \\~ 0.0611) and a tipping point analysis here aims to find the point at which the analysis turns to be significant, instead of non-significant.\n\n### Visual comparison\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/R_comparison_est.png){fig-align='center' width=80%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/R_comparison_pval.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Flexible delta adjustments\n\nSo far, we have only considered simple delta adjustments that add the same value to all imputed missing data. However, you may want to implement more flexible delta adjustments for post-ICE missing data, where the magnitude of the delta varies depending on the distance of the visit from the ICE visit.\n\nTo facilitate the creation of such flexible delta adjustments, the `delta_template()` function has two additional arguments `delta` and `dlag`:\n\n- `delta`: specifies the default amount of delta that should be applied to each post-ICE visit (default is NULL)\n- `dlag`: specifies the scaling coefficient to be applied based upon the visits proximity to the first visit affected by the ICE (default is NULL)\n\nThe usage of the `delta` and `dlag` arguments is best illustrated with a few examples from the [rbmi advanced functionality vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses).\n\n### Scaling delta by visit\n\nAssume a setting with 4 visits and the user specified `delta = c(5, 6, 7, 8)` and `dlag = c(1, 2, 3, 4)`. For a subject for whom the first visit affected by the ICE is visit 2, these values of `delta` and `dlag` would imply the following delta offset:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| Dlag | 0 | 1 | 2 | 3 |\n| Delta \\* dlag | 0 | 6 | 14 | 24 |\n| Cumulative sum | 0 | 6 | 20 | 44 |\n\nThat is, the subject would have a delta adjustment of 0 applied to visit 1, 6 for visit 2, 20 for visit 3 and 44 for visit 4.\n\nAssume instead, that the subject’s first visit affected by the ICE was visit 3. Then, the above values of `delta` and `dlag` would imply the following delta adjustment:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| Dlag | 0 | 0 | 1 | 2 |\n| Delta \\* dlag | 0 | 0 | 7 | 16 |\n| Cumulative sum | 0 | 0 | 7 | 23 |\n\nAnd thus the subject would have a delta adjustment of 0 applied to visits 1 and 2, 7 for visit 3 and 23 for visit 4.\n\nAnother way of using these arguments is to set `delta` to the difference in time between visits and `dlag` to be the amount of delta per unit of time. For example, let’s say that visits occur on weeks 1, 5, 6 and 9 and that we want a delta of 3 to be applied for each week after an ICE. For simplicity, we assume that the ICE occurs immediately after the subject’s last visit which is not affected by the ICE. This could be achieved by setting `delta = c(1, 4, 1, 3)`, i.e. the difference in weeks between each visit, and `dlag = c(3, 3, 3, 3)`.\n\nAssume a subject’s first visit affected by the ICE was visit 2, then these values of `delta` and `dlag` would imply the following delta adjustment:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 1 | 4 | 1 | 3 |\n| Dlag | 0 | 3 | 3 | 3 |\n| Delta \\* dlag | 0 | 12 | 3 | 9 |\n| Cumulative sum | 0 | 12 | 15 | 24 |\n\nLet's now consider the antidepressant data again. Suppose we apply a delta adjustment of 2 for each week following an ICE in the intervention group only. For example, if the ICE took place immediately after visit 4, then the cumulative delta applied to a missing value from visit 5 would be 2, from visit 6 would be 4, and from visit 7 would be 6.\n\nTo program this, we first use the `delta` and `dlag` arguments of `delta_template()`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_delta <- rbmi::delta_template(\n imputeObj,\n delta = c(2, 2, 2, 2),\n dlag = c(1, 1, 1, 1)\n)\n\ndat_delta |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR2
15136DRUGTRUETRUETRUEMAR4
15137DRUGTRUETRUETRUEMAR6
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR2
15146PLACEBOTRUETRUETRUEMAR4
15147PLACEBOTRUETRUETRUEMAR6
\n
\n```\n\n:::\n:::\n\n\nThen, we use some metadata variables provided by `delta_template()` to manually reset the delta values for the control group back to 0.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_delta <- dat_delta |>\n mutate(delta = if_else(THERAPY == \"PLACEBO\", 0, delta))\n\ndat_delta |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR2
15136DRUGTRUETRUETRUEMAR4
15137DRUGTRUETRUETRUEMAR6
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR0
15146PLACEBOTRUETRUETRUEMAR0
15147PLACEBOTRUETRUETRUEMAR0
\n
\n```\n\n:::\n:::\n\n\nAnd lastly we use `dat_delta` to apply the desired delta offset to our analysis model under the `delta` argument of the `analyse()` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nanaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n delta = dat_delta,\n vars = vars_analyse\n)\n\npoolObj <- rbmi::pool(anaObj)\n```\n:::\n\n\n### Fixed delta\n\nYou may also add a simple, fixed delta using the `delta` and `dlag` arguments. To do this, `delta` should be specified as a vector of length equal to the amount of visits, e.g. `c(5, 5, 5, 5)`, while `dlag` should be `c(1, 0, 0, 0)`. This ensures a delta of 5 is added to each imputed missing value following an ICE, which we here assume to occur at the visit 2:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 5 | 5 | 5 |\n| Dlag | 0 | 1 | 0 | 0 |\n| Delta \\* dlag | 0 | 0 | 0 | 0 |\n| Cumulative sum | 0 | 5 | 5 | 5 |\n\nAdding a fixed delta in this way seems similar to what we explained in the *simple delta adjustments* section above, but there are some crucial differences. Remember the first case where we added delta = 5 to all imputed `is_missing` values:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 1) mutate delta = is_missing * 5\ndat_delta_5_v1 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * 5)\n```\n:::\n\n\nAnd remember the second case where we added delta = 5 to all imputed `is_missing` and `is_post_ice` values:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2) mutate delta = is_missing * is_post_ice * 5\ndat_delta_5_v2 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * is_post_ice * 5)\n```\n:::\n\n\nSimilarly, we now set `delta = c(5, 5, 5, 5)` and `dlag = c(1, 0, 0, 0)`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 3) delta = c(5, 5, 5, 5), dlag = c(1, 0, 0, 0)\ndat_delta_5_v3 <- delta_template(\n imputeObj,\n delta = c(5, 5, 5, 5),\n dlag = c(1, 0, 0, 0)\n)\n```\n:::\n\n\nThe difference between these three approaches lies in how they treat intermittent missing values that do not correspond to study drug discontinuation due to an ICE.\n\nIf we consider patient 3618 again, we see that its intermittent missing value at visit 5 has delta = 5 added in the first approach (using `is_missing * 5`), while this missing value is not considered at all to receive a delta adjustment in the second or third approach (using `is_missing * is_post_ice * 5`, or `delta = c(5, 5, 5, 5)` and `dlag = c(1, 0, 0, 0)`). Thus by default, the `delta` and `dlag` arguments of `delta_template()` (third approach) only add delta adjustments to post-ICE missing values.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset without delta adjustment\ngt(MI_10 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA1.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# 1) mutate delta = is_missing * 5\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v1,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == 3618) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA6.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# 2) mutate delta = is_missing * is_post_ice * 5\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v2,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == 3618) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA1.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# 3) delta = c(5, 5, 5, 5), dlag = c(1, 0, 0, 0)\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v3,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == 3618) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.00000new_pt_99
3618MDRUGNA58NA1.28834new_pt_99
3618MDRUG2868146.00000new_pt_99
3618MDRUG4278102.00000new_pt_99
\n
\n```\n\n:::\n:::\n\n\nOne should be aware of this discrepancy when using the `rbmi` package for delta adjustments. For all tipping point analyses performed under MAR and MNAR in this tutorial, we adopted the first approach and applied delta adjustments to **all** imputed missing data. In contrast, we note that the `five macros` in SAS uses the second `delta` and `dlag` approach as discussed here, i.e. it does not apply delta adjustments to intermittent missing values. This could have important implications for datasets with high proportions of intermittent missing values, as it could alter the results of the tipping point analysis substantially.\n\n## References\n\n[Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/). Sensitivity analysis for clinical trials with missing continuous outcome data using controlled multiple imputation: A practical guide. *Statistics in Medicine*. 2020;39(21):2815-2842.\n\n[Gower-Page et al. 2022](https://joss.theoj.org/papers/10.21105/joss.04251). rbmi: A R package for standard and reference-based multiple imputation methods. *Journal of Open Source Software* 7(74):4251.\n\n[rbmi: Advanced Functionality](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses)\n\n[rbmi: Quickstart](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html)\n\n[rbmi: Reference-Based Multiple Imputation](https://cran.r-project.org/web/packages/rbmi/index.html)\n\n[Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. Addressing intercurrent events: Treatment policy and hypothetical strategies. *Joint EFSPI and BBS virtual event.*\n\n[Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data). Fitting reference-based models for missing data to longitudinal repeated-measures Normal data. User guide five macros.\n\n::: {.callout-note collapse=\"true\" title=\"Session info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P assertthat 0.2.1 2019-03-21 [?] RSPM\n P backports 1.5.0 2024-05-23 [?] RSPM\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.2)\n P broom 1.0.12 2026-01-27 [?] RSPM\n P callr 3.7.6 2024-03-25 [?] RSPM\n P checkmate 2.3.4 2026-02-03 [?] RSPM\n P cli 3.6.5 2025-04-23 [?] RSPM\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P curl 7.0.0 2025-08-19 [?] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P emmeans * 2.0.1 2025-12-16 [?] RSPM\n P estimability 1.5.1 2024-05-12 [?] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P farver 2.1.2 2024-05-13 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P forcats 1.0.1 2025-09-25 [?] RSPM\n P foreach 1.5.2 2022-02-02 [?] RSPM\n P fs 1.6.6 2025-04-12 [?] RSPM\n P furrr * 0.3.1 2022-08-15 [?] RSPM\n P future * 1.69.0 2026-01-16 [?] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM\n P glmnet 4.1-10 2025-07-17 [?] RSPM\n P globals 0.19.0 2026-02-02 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n P gridExtra * 2.3 2017-09-09 [?] RSPM\n P gt * 1.3.0 2026-01-22 [?] RSPM\n P gtable 0.3.6 2024-10-25 [?] RSPM\n P haven 2.5.5 2025-05-30 [?] RSPM\n P hms 1.1.4 2025-10-17 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P inline 0.3.21 2025-01-09 [?] RSPM\n P iterators 1.0.14 2022-02-05 [?] RSPM\n P jinjar 0.3.2 2025-03-13 [?] RSPM\n P jomo 2.7-6 2023-04-15 [?] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n P labeling 0.4.3 2023-08-29 [?] RSPM\n P labelled * 2.16.0 2025-10-22 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P listenv 0.10.0 2025-11-02 [?] RSPM\n P lme4 1.1-38 2025-12-02 [?] RSPM\n P loo 2.9.0 2025-12-23 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P matrixStats 1.5.0 2025-01-07 [?] RSPM\n P mice * 3.19.0 2025-12-10 [?] RSPM\n P minqa 1.2.8 2024-08-17 [?] RSPM\n P mitml 0.4-5 2023-03-08 [?] RSPM\n P mmrm * 0.3.17 2026-01-08 [?] RSPM\n P multcomp 1.4-29 2025-10-20 [?] RSPM\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n P nloptr 2.2.1 2025-03-17 [?] RSPM\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM\n P pan 1.9 2023-12-07 [?] RSPM\n P parallelly * 1.46.1 2026-01-08 [?] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgbuild 1.4.8 2025-05-26 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P processx 3.8.6 2025-02-21 [?] RSPM\n P ps 1.9.1 2025-04-12 [?] RSPM\n P purrr * 1.2.1 2026-01-09 [?] RSPM\n P QuickJSR 1.9.0 2026-01-25 [?] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n P rbibutils 2.4.1 2026-01-21 [?] RSPM\n P rbmi * 1.6.0 2026-01-23 [?] RSPM\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM\n P Rcpp 1.1.1 2026-01-10 [?] RSPM\n P RcppParallel 5.1.11-1 2025-08-27 [?] RSPM\n P Rdpack 2.6.6 2026-02-08 [?] RSPM\n P reformulas 0.4.4 2026-02-02 [?] RSPM\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n rpart 4.1.24 2025-01-07 [2] CRAN (R 4.5.2)\n P rstan * 2.32.7 2025-03-10 [?] RSPM\n P S7 0.2.1 2025-11-14 [?] RSPM\n P sandwich 3.1-1 2024-09-15 [?] RSPM\n P sass 0.4.10 2025-04-11 [?] RSPM\n P scales 1.4.0 2025-04-24 [?] RSPM\n P sessioninfo 1.2.3 2025-02-05 [?] RSPM\n P shape 1.4.6.1 2024-02-23 [?] RSPM\n P StanHeaders * 2.32.10 2024-07-15 [?] RSPM\n P stringi 1.8.7 2025-03-27 [?] RSPM\n P stringr 1.6.0 2025-11-04 [?] RSPM\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P TH.data 1.1-5 2025-11-17 [?] RSPM\n P tibble 3.3.1 2026-01-11 [?] RSPM\n P tidyr * 1.3.2 2025-12-19 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P TMB 1.9.19 2025-12-15 [?] RSPM\n P utf8 1.2.6 2025-06-08 [?] RSPM\n P V8 8.0.1 2025-10-10 [?] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n P xml2 1.5.2 2026-01-17 [?] RSPM\n P xtable 1.8-4 2019-04-21 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n P zoo 1.8-15 2025-12-15 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"R Tipping Point (Delta Adjustment): Continuous Data\"\n---\n\n\n\n# Tipping Point / Delta Adjustment\n\n## Setup\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# General libraries\nlibrary(mice)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(gt)\nlibrary(labelled)\nlibrary(purrr)\nlibrary(ggplot2)\nlibrary(gridExtra)\n\n# Methodology specific libraries\nlibrary(emmeans)\nlibrary(mmrm)\nlibrary(rstan)\nlibrary(rbmi)\n\n# Paralleisation libraries\nlibrary(future)\nlibrary(furrr)\nlibrary(parallelly)\n```\n:::\n\n\n### Random seed\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(12345)\n```\n:::\n\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\n\nThe concept of delta adjustment and tipping point analysis builds on the framework of reference-based multiple imputation (rbmi) as seen on its respective [CAMIS webpage](../R/rbmi_continuous_joint.html). The use of the `rbmi` package in R ([Gower-Page et al. 2022](https://joss.theoj.org/papers/10.21105/joss.04251)) for the following standard and reference-based multiple imputation approaches are introduced there:\n\n- Missing At Random (MAR)\n\n- Jump to Reference (JR)\n\n- Copy Reference (CR)\n\n- Copy Increment from Reference (CIR)\n\nPlease make sure to familiarize yourself with these functionalities of the `rbmi` package before checking this tutorial. The outline of this page generally follows the [rbmi advanced functionality vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses).\n\n### Data\n\nThe same publicly available [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial that was used to illustrate `rbmi` is again used for this tutorial. This dataset is also used in the [rbmi quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html).\n\nThe relevant endpoint for the antidepressant trial was assessed using the Hamilton 17-item depression rating scale (HAMD17), which was measured at baseline and subsequently at weeks 1, 2, 3, 4 and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects in the active drug group, compared to 26% (23/88) of subjects in the placebo group. Importantly, all data after study drug discontinuation are missing and there is a single intermittent missing observation.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndata(\"antidepressant_data\")\n\ndat <- antidepressant_data |>\n dplyr::select(\n PATIENT,\n GENDER,\n THERAPY,\n RELDAYS,\n VISIT,\n BASVAL,\n HAMDTL17,\n CHANGE\n ) |>\n dplyr::mutate(THERAPY = factor(THERAPY, levels = c(\"PLACEBO\", \"DRUG\"))) |>\n labelled::remove_labels()\n\ngt(head(dat, n = 10))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
1503FDRUG743221-11
1503FDRUG1453220-12
1503FDRUG2863219-13
1503FDRUG4273217-15
1507FPLACEBO741411-3
1507FPLACEBO15514140
1507FPLACEBO296149-5
1507FPLACEBO427145-9
1509FDRUG742120-1
1509FDRUG1452118-3
\n
\n```\n\n:::\n:::\n\n\nThe number of patients per visit and treatment group are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n dplyr::summarise(N = n(), .by = c(VISIT, THERAPY))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 3\n VISIT THERAPY N\n \n1 4 DRUG 84\n2 5 DRUG 77\n3 6 DRUG 73\n4 7 DRUG 64\n5 4 PLACEBO 88\n6 5 PLACEBO 81\n7 6 PLACEBO 76\n8 7 PLACEBO 65\n```\n\n\n:::\n:::\n\n\nThe mean change from baseline of the HAMD17 endpoint per visit and treatment group using only the complete cases are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat |>\n dplyr::summarise(N = n(), MEAN = mean(CHANGE), .by = c(VISIT, THERAPY))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 8 × 4\n VISIT THERAPY N MEAN\n \n1 4 DRUG 84 -1.82\n2 5 DRUG 77 -4.71\n3 6 DRUG 73 -6.79\n4 7 DRUG 64 -8.34\n5 4 PLACEBO 88 -1.51\n6 5 PLACEBO 81 -2.70\n7 6 PLACEBO 76 -4.07\n8 7 PLACEBO 65 -5.14\n```\n\n\n:::\n:::\n\n\nThe missingness pattern is:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_wide = dat |>\n dplyr::select(PATIENT, VISIT, CHANGE) |>\n pivot_wider(\n id_cols = PATIENT,\n names_from = VISIT,\n names_prefix = \"VISIT_\",\n values_from = CHANGE\n )\n\ndat_wide |>\n dplyr::select(starts_with(\"VISIT_\")) |>\n mice::md.pattern(plot = TRUE, rotate.names = TRUE)\n```\n\n::: {.cell-output-display}\n![](tipping_point_files/figure-html/data_exploration_4-1.png){width=672}\n:::\n\n::: {.cell-output .cell-output-stdout}\n\n```\n VISIT_4 VISIT_5 VISIT_6 VISIT_7 \n128 1 1 1 1 0\n20 1 1 1 0 1\n10 1 1 0 0 2\n1 1 0 1 1 1\n13 1 0 0 0 3\n 0 14 23 43 80\n```\n\n\n:::\n:::\n\n\nThere is a single patient with an intermittent missing observation at visit 5, which is patient 3618. Special considerations need to be taken when applying delta adjustments to intermittent missing observations like this one (more on this later).\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_expand <- rbmi::expand_locf(\n dat,\n PATIENT = levels(dat$PATIENT),\n VISIT = levels(dat$VISIT),\n vars = c(\"BASVAL\", \"THERAPY\", \"GENDER\"),\n group = c(\"PATIENT\"),\n order = c(\"PATIENT\", \"VISIT\")\n)\n\ndat_expand |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGE
3618MDRUG848157
3618MDRUGNA58NANA
3618MDRUG2868146
3618MDRUG4278102
\n
\n```\n\n:::\n:::\n\n\n### Preparation\n\nThis tutorial will focus on tipping point analysis and delta adjustment. We assume the user used the `rbmi` package to create an imputation object called `imputeObj` (see [CAMIS webpage](../R/rbmi_continuous_joint.html)).\n\n\n\n\n\n\n\n## Tipping point analysis and delta adjustment\n\n### Methodology introduction\n\nWhen analyses for endpoints are performed under MAR or MNAR assumptions for missing data, it is important to perform sensitivity analyses to assess the impact of deviations from these assumptions. Tipping point analysis (or delta adjustment method) is an example of a sensitivity analysis that can be used to assess the robustness of a clinical trial when its result is based on imputed missing data.\n\nGenerally, tipping point analysis explores the influence of missingness on the overall conclusion of the treatment difference by shifting imputed missing values in the treatment group towards the reference group until the result becomes non-significant. The tipping point is the minimum shift needed to make the result non-significant. If the minimum shift needed to make the result non-significant is implausible, then greater confidence in the primary results can be inferred.\n\nTipping point analysis generally happens by adjusting imputing values by so-called delta values. The observed tipping point is the minimum delta needed to make the result non-significant. Mostly a range of delta values is explored and only imputed values from the active treatment group are adjusted by the delta value. However, delta adjustments in the control group are possible as well. Naturally, the range of acceptable values for delta should be agreed a priori, before taking this approach.\n\nFor an extensive discussion on delta adjustment methods, we refer to [Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/).\n\n## Simple delta adjustments\n\n### Generate delta's\n\nIn the `rbmi` package, the `delta` argument of the `analyse()` function allows users to adjust the imputed datasets prior to the analysis stage. This `delta` argument requires a data frame created by `delta_template()`, which includes a column called `delta` that specifies the delta values to be added.\n\nBy default, `delta_template()` will set `delta` to 0 for all observations.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndat_delta_0 <- delta_template(imputations = imputeObj)\n\n\ndat_delta_0 |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR0
15136DRUGTRUETRUETRUEMAR0
15137DRUGTRUETRUETRUEMAR0
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR0
15146PLACEBOTRUETRUETRUEMAR0
15147PLACEBOTRUETRUETRUEMAR0
\n
\n```\n\n:::\n:::\n\n\nYou can add the delta values to the outcome variable (CHANGE) of one of the imputed datasets by using the `apply_delta()` function. Of course, nothing is changed here as delta = 0.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nimputed_dfs = rbmi::extract_imputed_dfs(imputeObj)\nMI_10 = imputed_dfs[[10]]\nMI_10$PATIENT2 = MI_10$PATIENT\nMI_10$PATIENT = dat_expand$PATIENT\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |> head(8))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA-2.297931new_pt_5
1513MDRUGNA619NA-6.307404new_pt_5
1513MDRUGNA719NA-3.354081new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA4.878866new_pt_6
1514FPLACEBONA621NA2.342392new_pt_6
1514FPLACEBONA721NA7.052081new_pt_6
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_0,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA-2.297931new_pt_5
1513MDRUGNA619NA-6.307404new_pt_5
1513MDRUGNA719NA-3.354081new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA4.878866new_pt_6
1514FPLACEBONA621NA2.342392new_pt_6
1514FPLACEBONA721NA7.052081new_pt_6
\n
\n```\n\n:::\n:::\n\n\nYou may have noticed that the `is_missing` and `is_post_ice` columns of the delta data frame lend themselves perfectly to adjust the delta values, as the boolean variables `TRUE` and `FALSE` are regarded as 1 and 0 by R. If you want to set delta to 5 for all missing values, you can do so by multiplying the `is_missing` column by 5. In our case, this addition assumes a \"worsening\" of the imputed outcome variable, CHANGE, which is measured on the HAMD17 scale.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndat_delta_5_v1 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * 5)\n\ndat_delta_5_v1 |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR5
15136DRUGTRUETRUETRUEMAR5
15137DRUGTRUETRUETRUEMAR5
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR5
15146PLACEBOTRUETRUETRUEMAR5
15147PLACEBOTRUETRUETRUEMAR5
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |> head(8))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA-2.297931new_pt_5
1513MDRUGNA619NA-6.307404new_pt_5
1513MDRUGNA719NA-3.354081new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA4.878866new_pt_6
1514FPLACEBONA621NA2.342392new_pt_6
1514FPLACEBONA721NA7.052081new_pt_6
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v1,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA2.702069new_pt_5
1513MDRUGNA619NA-1.307404new_pt_5
1513MDRUGNA719NA1.645919new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA9.878866new_pt_6
1514FPLACEBONA621NA7.342392new_pt_6
1514FPLACEBONA721NA12.052081new_pt_6
\n
\n```\n\n:::\n:::\n\n\nImportantly, if you multiply the `is_missing` column only, you apply the delta adjustment to **all** imputed missing values, including intermittent missing values. This can be checked by looking at patient 3618, which has an intermittent missing value at visit 5.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ngt(dat_delta_5_v1 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
36184DRUGTRUEFALSEFALSENA0
36185DRUGTRUETRUEFALSEMAR5
36186DRUGTRUEFALSEFALSENA0
36187DRUGTRUEFALSEFALSENA0
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA1.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v1,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA6.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n:::\n\n\nIf you consider the `is_post_ice` column too, you can restrict the delta adjustment to missing values that occur after study drug discontinuation due to an intercurrent event (ICE). By multiplying both the `is_missing` and `is_post_ice` columns by your chosen delta, the delta value will only be added when both columns are `TRUE`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndat_delta_5_v2 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * is_post_ice * 5)\n\ndat_delta_5_v2 |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
36184DRUGTRUEFALSEFALSENA0
36185DRUGTRUETRUEFALSEMAR0
36186DRUGTRUEFALSEFALSENA0
36187DRUGTRUEFALSEFALSENA0
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset\ngt(MI_10 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA1.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v2,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == \"3618\") |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA1.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n:::\n\n\nBesides choosing which missing data to apply the delta adjustment to, you may also want to apply different delta adjustments to imputed data from the different groups. As an example, let's set delta = 0 for the control group, and delta = 5 for the intervention group. Here, we consider the `is_missing` column only, so that we apply the delta's to **all** imputed missing data.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta template\ndelta_control = 0\ndelta_intervention = 5\n\ndat_delta_0_5 <- rbmi::delta_template(imputations = imputeObj) |>\n mutate(\n delta_ctl = (THERAPY == \"PLACEBO\") * is_missing * delta_control,\n delta_int = (THERAPY == \"DRUG\") * is_missing * delta_intervention,\n delta = delta_ctl + delta_int\n )\n\ndat_delta_0_5 |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydeltadelta_ctldelta_int
15134DRUGTRUEFALSEFALSENA000
15135DRUGTRUETRUETRUEMAR505
15136DRUGTRUETRUETRUEMAR505
15137DRUGTRUETRUETRUEMAR505
15144PLACEBOTRUEFALSEFALSENA000
15145PLACEBOTRUETRUETRUEMAR000
15146PLACEBOTRUETRUETRUEMAR000
15147PLACEBOTRUETRUETRUEMAR000
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# delta adjusted dataset\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_0_5,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
1513MDRUG7419245.000000new_pt_5
1513MDRUGNA519NA2.702069new_pt_5
1513MDRUGNA619NA-1.307404new_pt_5
1513MDRUGNA719NA1.645919new_pt_5
1514FPLACEBO7421232.000000new_pt_6
1514FPLACEBONA521NA4.878866new_pt_6
1514FPLACEBONA621NA2.342392new_pt_6
1514FPLACEBONA721NA7.052081new_pt_6
\n
\n```\n\n:::\n:::\n\n\nThe `delta_template()` function has two additional arguments, `delta` and `dlag`, that can be used to define the delta adjustments. We explain these arguments in more detail in the *flexible delta adjustments* section below.\n\n### Run analysis model with delta adjustment\n\nAs mentioned, delta adjustments are implemented via the `delta` argument of the `analyse()` function. The adjustment happens right after data imputation under MAR or MNAR (using reference-based imputation approaches), but before implementing the analysis model. Sensitivity analyses can therefore be performed without having to refit the imputation model, which is computationally efficient. This approach is considered a *marginal* delta adjustment approach, because the delta is simply added to the mean of the conditional multivariate normal distribution (conditional on the observed values and the covariates) for the imputation model ([Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data)).\n\nHere, we apply the delta adjustment of 5 to **all** imputed values of the outcome variable (CHANGE) in the intervention group. The estimated treatment effect at visit 7 is presented below.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nanaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n delta = dat_delta_0_5,\n vars = vars_analyse\n)\n\npoolObj <- rbmi::pool(anaObj)\n\npoolObj |>\n data.frame() |>\n dplyr::filter(grepl(\"7\", parameter)) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
parameterestselciucipval
trt_7-1.5765381.1557960-3.8608870.70781181.746687e-01
lsm_ref_7-4.8510110.8031056-6.438303-3.26371941.230713e-08
lsm_alt_7-6.4275490.8285086-8.065272-4.78982561.496383e-12
\n
\n```\n\n:::\n:::\n\n\n## Tipping point analysis: MAR approach\n\n### Generate delta's: sequential delta adjustment for intervention arm\n\nTo perform a tipping point analysis under the MAR assumption, we must create a range of delta values. In this section, we only specify a range of delta's for the intervention group.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndelta_df1 <- expand.grid(\n delta_control = 0,\n delta_intervention = seq(-3, 8, by = 1)\n) |>\n as_tibble()\n```\n:::\n\n\n### Perform tipping point analysis\n\nTo enable a tipping point analysis within a single function, we create `perform_tipp_analysis()`. This custom function requires a stratified delta for `delta_control` and `delta_intervention`, alongside `cl` as set in the previous step.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nperform_tipp_analysis <- function(delta_control, delta_intervention) {\n dat_delta <- rbmi::delta_template(imputeObj) |>\n mutate(\n delta_ctl = (THERAPY == \"PLACEBO\") * is_missing * delta_control,\n delta_int = (THERAPY == \"DRUG\") * is_missing * delta_intervention,\n delta = delta_ctl + delta_int\n )\n\n anaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n delta = dat_delta,\n vars = vars_analyse\n )\n\n poolObj <- as.data.frame(pool(anaObj)) |>\n dplyr::filter(grepl(\"trt_7\", parameter))\n\n list(\n trt_7 = poolObj[[\"est\"]],\n pval_7 = poolObj[[\"pval\"]],\n lci_7 = poolObj[[\"lci\"]],\n uci_7 = poolObj[[\"uci\"]]\n )\n}\n```\n:::\n\n\nNow, let's apply this function to the antidepressant data as follows:\n\n**Note:** here we are adding some parallelisation using {furrr} to speed things up.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nworkers <- parallelly::availableCores(omit = 1)\nfuture::plan(multisession, workers = workers)\n\nMAR_tipp_df1 <- delta_df1 |> \n furrr::future_pmap(perform_tipp_analysis) |>\n purrr::reduce(bind_rows) \n\nMAR_tipp_df1 <- dplyr::bind_cols(delta_df1, MAR_tipp_df1)\n```\n:::\n\n\nThe results of the tipping point analysis under MAR with p-value $\\geq$ 0.05 are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_tipp_df1 |>\n filter(pval_7 >= 0.05) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
delta_controldelta_interventiontrt_7pval_7lci_7uci_7
03-2.0625630.07099705-4.3038570.1787318
04-1.8195500.11389514-4.0806180.4415176
05-1.5765380.17466868-3.8608870.7078118
06-1.3335250.25598276-3.6445590.9775087
07-1.0905130.35876021-3.4315171.2504920
08-0.8475000.48163794-3.2216371.5266373
\n
\n```\n\n:::\n:::\n\n\nThe results of the tipping point analysis under MAR with p-value $<$ 0.05 are:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_tipp_df1 |>\n filter(pval_7 < 0.05) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n
delta_controldelta_interventiontrt_7pval_7lci_7uci_7
0-3-3.5206380.001908838-5.721089-1.32018653
0-2-3.2776250.003736614-5.475403-1.07984684
0-1-3.0346130.007171413-5.233537-0.83568785
00-2.7916000.013407865-4.995485-0.58771551
01-2.5485880.024279088-4.761220-0.33595542
02-2.3055750.042372837-4.530698-0.08045217
\n
\n```\n\n:::\n:::\n\n\nWe can derive an **exact** tipping point by linearly interpolating between the last \"significant\" delta and the first \"non-significant\" delta using the `approx()` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndelta_tp <- approx(\n x = MAR_tipp_df1$pval_7,\n y = MAR_tipp_df1$delta_intervention,\n xout = 0.05\n)$y\n\ntrt_tp <- approx(\n x = MAR_tipp_df1$delta_intervention,\n y = MAR_tipp_df1$trt_7,\n xout = delta_tp\n)$y\n\nlci_tp <- approx(\n x = MAR_tipp_df1$delta_intervention,\n y = MAR_tipp_df1$lci_7,\n xout = delta_tp\n)$y\n\nuci_tp <- approx(\n x = MAR_tipp_df1$delta_intervention,\n y = MAR_tipp_df1$uci_7,\n xout = delta_tp\n)$y\n\ndata.frame(\n delta_control = 0,\n delta_intervention = delta_tp,\n trt_7 = trt_tp,\n pval_7 = 0.05,\n lci_7 = lci_tp,\n uci_7 = uci_tp\n) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n \n
delta_controldelta_interventiontrt_7pval_7lci_7uci_7
02.266458-2.2408220.05-4.470254-0.01139042
\n
\n```\n\n:::\n:::\n\n\n### Visualize results\n\nA nice visualization of this tipping point analysis for the MAR approach is shown below. The dashed horizontal line indicates a p-value of 0.05 in the left plot and no treatment effect in the right plot.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_est <- ggplot(MAR_tipp_df1, aes(delta_intervention, trt_7)) +\n geom_line() +\n geom_point() +\n geom_ribbon(\n aes(delta_intervention, ymin = lci_7, ymax = uci_7),\n alpha = 0.25\n ) +\n geom_hline(yintercept = 0.0, linetype = 2) +\n geom_vline(xintercept = delta_tp, linetype = 2) +\n scale_x_continuous(breaks = seq(-6, 10, 2)) +\n labs(x = \"Delta (intervention)\", y = \"Treatment effect (95% CI)\")\n\nMAR_pval <- ggplot(MAR_tipp_df1, aes(delta_intervention, pval_7)) +\n geom_line() +\n geom_point() +\n geom_hline(yintercept = 0.05, linetype = 2) +\n geom_vline(xintercept = delta_tp, linetype = 2) +\n scale_x_continuous(breaks = seq(-6, 10, 2)) +\n labs(x = \"Delta (intervention)\", y = \"P-value\")\n\ngrid.arrange(MAR_pval, MAR_est, nrow = 1)\n```\n\n::: {.cell-output-display}\n![](tipping_point_files/figure-html/MAR_v1_tipping_point_visualization-1.png){width=672}\n:::\n:::\n\n\nWe clearly see that the p-value under MAR reaches a tipping point from 3 onward in the range of delta's considered.\n\n### Delta adjustment for control and intervention arms\n\nLet's now create a sequence of delta's for the control group too, and carry out a second tipping point analysis under the MAR assumption.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndelta_df2 <- expand.grid(\n delta_control = seq(-3, 8, by = 1),\n delta_intervention = seq(-3, 8, by = 1)\n) |>\n as_tibble()\n```\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_tipp_df2 <- delta_df2 |>\n furrr::future_pmap(perform_tipp_analysis) |>\n purrr::reduce(bind_rows) \n\n# Adding back the delta's used for reference \nMAR_tipp_df2 <- dplyr::bind_cols(delta_df2, MAR_tipp_df2) |>\n mutate(\n pval = cut(\n pval_7,\n c(0, 0.001, 0.01, 0.05, 0.2, 1),\n right = FALSE,\n labels = c(\n \"<0.001\",\n \"0.001 - <0.01\",\n \"0.01 - <0.05\",\n \"0.05 - <0.20\",\n \">= 0.20\"\n )\n )\n ) \n```\n:::\n\n\nWe can visualize the result of this tipping point analysis using a heatmap. Here, the **(0,0)** point corresponds to the original result without any delta adjustment (p \\~ 0.0125).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nMAR_heat <- ggplot(\n MAR_tipp_df2,\n aes(delta_control, delta_intervention, fill = pval)\n) +\n geom_raster() +\n scale_fill_manual(\n values = c(\"darkgreen\", \"lightgreen\", \"lightyellow\", \"orange\", \"red\")\n ) +\n scale_x_continuous(breaks = seq(-5, 10, 1)) +\n scale_y_continuous(breaks = seq(-5, 10, 1)) +\n labs(x = \"Delta (control)\", y = \"Delta (intervention)\", fill = \"P-value\")\nMAR_heat\n```\n\n::: {.cell-output-display}\n![](tipping_point_files/figure-html/MAR_v2_tipping_point_visualization-1.png){width=672}\n:::\n:::\n\n\n## Comparison with rbmi MNAR approaches\n\n### Summary of results\n\nIn the table below we present the results of the different imputation strategies with varying number of multiple imputation draws, M = 500 and M = 5000. Note that the results can be slightly different from the results above due to a possible different seed. The estimates show the contrast at visit 7 between DRUG and PLACEBO (DRUG - PLACEBO). Delta adjustments were applied to **all** imputed missing data in the intervention group only.\n\n| Method | Delta control | Delta intervention at TP | Estimate at TP | 95% CI | P-value | Original estimate | Original p-value |\n|---------|---------|---------|---------|---------|---------|---------|---------|\n| MI - MAR (M=500) | 0 | 3 | -2.074 | -4.324 to 0.176 | 0.0709 | -2.798 | 0.0135 |\n| MI - MAR (M=5000) | 0 | 3 | -2.100 | -4.354 to 0.154 | 0.0675 | -2.829 | 0.0128 |\n| MI - MNAR JR (M=500) | 0 | -1 | -2.380 | -4.595 to -0.165 | 0.0354 | -2.137 | 0.0602 |\n| MI - MNAR JR (M=5000) | 0 | -1 | -2.383 | -4.608 to -0.157 | 0.0361 | -2.140 | 0.0611 |\n| MI - MNAR CR (M=500) | 0 | 1 | -2.151 | -4.359 to 0.057 | 0.0561 | -2.394 | 0.0326 |\n| MI - MNAR CR (M=5000) | 0 | 1 | -2.162 | -4.377 to 0.054 | 0.0558 | -2.405 | 0.0324 |\n| MI - MNAR CIR (M=500) | 0 | 2 | -1.986 | -4.211 to 0.239 | 0.0798 | -2.472 | 0.0274 |\n| MI - MNAR CIR (M=5000) | 0 | 2 | -1.994 | -4.227 to 0.239 | 0.0796 | -2.480 | 0.0274 |\n\nOf all considered approaches, the MAR approach yields the largest delta adjustment at its tipping point, with a delta intervention of 3 at both M = 500 and M = 5000. This indicates that the MAR assumption is the most robust against slight deviations of its conditions. Notice that for the MNAR JR approach we included, for completeness, tipping point analyses to know when the results switch from non-significant to significant. Correspondingly, two negative delta's (-1) are found at the tipping point for M = 500 and M = 5000. This is expected, given that the original analyses are non-significant (p \\~ 0.0602 and p \\~ 0.0611) and a tipping point analysis here aims to find the point at which the analysis turns to be significant, instead of non-significant.\n\n### Visual comparison\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/R_comparison_est.png){fig-align='center' width=80%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/R_comparison_pval.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Flexible delta adjustments\n\nSo far, we have only considered simple delta adjustments that add the same value to all imputed missing data. However, you may want to implement more flexible delta adjustments for post-ICE missing data, where the magnitude of the delta varies depending on the distance of the visit from the ICE visit.\n\nTo facilitate the creation of such flexible delta adjustments, the `delta_template()` function has two additional arguments `delta` and `dlag`:\n\n- `delta`: specifies the default amount of delta that should be applied to each post-ICE visit (default is NULL)\n- `dlag`: specifies the scaling coefficient to be applied based upon the visits proximity to the first visit affected by the ICE (default is NULL)\n\nThe usage of the `delta` and `dlag` arguments is best illustrated with a few examples from the [rbmi advanced functionality vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses).\n\n### Scaling delta by visit\n\nAssume a setting with 4 visits and the user specified `delta = c(5, 6, 7, 8)` and `dlag = c(1, 2, 3, 4)`. For a subject for whom the first visit affected by the ICE is visit 2, these values of `delta` and `dlag` would imply the following delta offset:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| Dlag | 0 | 1 | 2 | 3 |\n| Delta \\* dlag | 0 | 6 | 14 | 24 |\n| Cumulative sum | 0 | 6 | 20 | 44 |\n\nThat is, the subject would have a delta adjustment of 0 applied to visit 1, 6 for visit 2, 20 for visit 3 and 44 for visit 4.\n\nAssume instead, that the subject’s first visit affected by the ICE was visit 3. Then, the above values of `delta` and `dlag` would imply the following delta adjustment:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| Dlag | 0 | 0 | 1 | 2 |\n| Delta \\* dlag | 0 | 0 | 7 | 16 |\n| Cumulative sum | 0 | 0 | 7 | 23 |\n\nAnd thus the subject would have a delta adjustment of 0 applied to visits 1 and 2, 7 for visit 3 and 23 for visit 4.\n\nAnother way of using these arguments is to set `delta` to the difference in time between visits and `dlag` to be the amount of delta per unit of time. For example, let’s say that visits occur on weeks 1, 5, 6 and 9 and that we want a delta of 3 to be applied for each week after an ICE. For simplicity, we assume that the ICE occurs immediately after the subject’s last visit which is not affected by the ICE. This could be achieved by setting `delta = c(1, 4, 1, 3)`, i.e. the difference in weeks between each visit, and `dlag = c(3, 3, 3, 3)`.\n\nAssume a subject’s first visit affected by the ICE was visit 2, then these values of `delta` and `dlag` would imply the following delta adjustment:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 1 | 4 | 1 | 3 |\n| Dlag | 0 | 3 | 3 | 3 |\n| Delta \\* dlag | 0 | 12 | 3 | 9 |\n| Cumulative sum | 0 | 12 | 15 | 24 |\n\nLet's now consider the antidepressant data again. Suppose we apply a delta adjustment of 2 for each week following an ICE in the intervention group only. For example, if the ICE took place immediately after visit 4, then the cumulative delta applied to a missing value from visit 5 would be 2, from visit 6 would be 4, and from visit 7 would be 6.\n\nTo program this, we first use the `delta` and `dlag` arguments of `delta_template()`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_delta <- rbmi::delta_template(\n imputeObj,\n delta = c(2, 2, 2, 2),\n dlag = c(1, 1, 1, 1)\n)\n\ndat_delta |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR2
15136DRUGTRUETRUETRUEMAR4
15137DRUGTRUETRUETRUEMAR6
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR2
15146PLACEBOTRUETRUETRUEMAR4
15147PLACEBOTRUETRUETRUEMAR6
\n
\n```\n\n:::\n:::\n\n\nThen, we use some metadata variables provided by `delta_template()` to manually reset the delta values for the control group back to 0.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_delta <- dat_delta |>\n mutate(delta = if_else(THERAPY == \"PLACEBO\", 0, delta))\n\ndat_delta |>\n dplyr::filter(PATIENT %in% c(\"1513\", \"1514\")) |>\n head(8) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
PATIENTVISITTHERAPYis_maris_missingis_post_icestrategydelta
15134DRUGTRUEFALSEFALSENA0
15135DRUGTRUETRUETRUEMAR2
15136DRUGTRUETRUETRUEMAR4
15137DRUGTRUETRUETRUEMAR6
15144PLACEBOTRUEFALSEFALSENA0
15145PLACEBOTRUETRUETRUEMAR0
15146PLACEBOTRUETRUETRUEMAR0
15147PLACEBOTRUETRUETRUEMAR0
\n
\n```\n\n:::\n:::\n\n\nAnd lastly we use `dat_delta` to apply the desired delta offset to our analysis model under the `delta` argument of the `analyse()` function.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nanaObj <- rbmi::analyse(\n imputations = imputeObj,\n fun = ancova,\n delta = dat_delta,\n vars = vars_analyse\n)\n\npoolObj <- rbmi::pool(anaObj)\n```\n:::\n\n\n### Fixed delta\n\nYou may also add a simple, fixed delta using the `delta` and `dlag` arguments. To do this, `delta` should be specified as a vector of length equal to the amount of visits, e.g. `c(5, 5, 5, 5)`, while `dlag` should be `c(1, 0, 0, 0)`. This ensures a delta of 5 is added to each imputed missing value following an ICE, which we here assume to occur at the visit 2:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 5 | 5 | 5 |\n| Dlag | 0 | 1 | 0 | 0 |\n| Delta \\* dlag | 0 | 0 | 0 | 0 |\n| Cumulative sum | 0 | 5 | 5 | 5 |\n\nAdding a fixed delta in this way seems similar to what we explained in the *simple delta adjustments* section above, but there are some crucial differences. Remember the first case where we added delta = 5 to all imputed `is_missing` values:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 1) mutate delta = is_missing * 5\ndat_delta_5_v1 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * 5)\n```\n:::\n\n\nAnd remember the second case where we added delta = 5 to all imputed `is_missing` and `is_post_ice` values:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 2) mutate delta = is_missing * is_post_ice * 5\ndat_delta_5_v2 <- delta_template(imputations = imputeObj) |>\n mutate(delta = is_missing * is_post_ice * 5)\n```\n:::\n\n\nSimilarly, we now set `delta = c(5, 5, 5, 5)` and `dlag = c(1, 0, 0, 0)`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# 3) delta = c(5, 5, 5, 5), dlag = c(1, 0, 0, 0)\ndat_delta_5_v3 <- delta_template(\n imputeObj,\n delta = c(5, 5, 5, 5),\n dlag = c(1, 0, 0, 0)\n)\n```\n:::\n\n\nThe difference between these three approaches lies in how they treat intermittent missing values that do not correspond to study drug discontinuation due to an ICE.\n\nIf we consider patient 3618 again, we see that its intermittent missing value at visit 5 has delta = 5 added in the first approach (using `is_missing * 5`), while this missing value is not considered at all to receive a delta adjustment in the second or third approach (using `is_missing * is_post_ice * 5`, or `delta = c(5, 5, 5, 5)` and `dlag = c(1, 0, 0, 0)`). Thus by default, the `delta` and `dlag` arguments of `delta_template()` (third approach) only add delta adjustments to post-ICE missing values.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# imputed dataset without delta adjustment\ngt(MI_10 |> dplyr::filter(PATIENT == \"3618\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA1.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# 1) mutate delta = is_missing * 5\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v1,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == 3618) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA6.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# 2) mutate delta = is_missing * is_post_ice * 5\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v2,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == 3618) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA1.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# 3) delta = c(5, 5, 5, 5), dlag = c(1, 0, 0, 0)\nrbmi:::apply_delta(\n MI_10,\n delta = dat_delta_5_v3,\n group = c(\"PATIENT\", \"VISIT\", \"THERAPY\"),\n outcome = \"CHANGE\"\n) |>\n dplyr::filter(PATIENT == 3618) |>\n gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n
PATIENTGENDERTHERAPYRELDAYSVISITBASVALHAMDTL17CHANGEPATIENT2
3618MDRUG848157.000000new_pt_99
3618MDRUGNA58NA1.135327new_pt_99
3618MDRUG2868146.000000new_pt_99
3618MDRUG4278102.000000new_pt_99
\n
\n```\n\n:::\n:::\n\n\nOne should be aware of this discrepancy when using the `rbmi` package for delta adjustments. For all tipping point analyses performed under MAR and MNAR in this tutorial, we adopted the first approach and applied delta adjustments to **all** imputed missing data. In contrast, we note that the `five macros` in SAS uses the second `delta` and `dlag` approach as discussed here, i.e. it does not apply delta adjustments to intermittent missing values. This could have important implications for datasets with high proportions of intermittent missing values, as it could alter the results of the tipping point analysis substantially.\n\n## References\n\n[Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/). Sensitivity analysis for clinical trials with missing continuous outcome data using controlled multiple imputation: A practical guide. *Statistics in Medicine*. 2020;39(21):2815-2842.\n\n[Gower-Page et al. 2022](https://joss.theoj.org/papers/10.21105/joss.04251). rbmi: A R package for standard and reference-based multiple imputation methods. *Journal of Open Source Software* 7(74):4251.\n\n[rbmi: Advanced Functionality](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses)\n\n[rbmi: Quickstart](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html)\n\n[rbmi: Reference-Based Multiple Imputation](https://cran.r-project.org/web/packages/rbmi/index.html)\n\n[Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. Addressing intercurrent events: Treatment policy and hypothetical strategies. *Joint EFSPI and BBS virtual event.*\n\n[Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data). Fitting reference-based models for missing data to longitudinal repeated-measures Normal data. User guide five macros.\n\n::: {.callout-note collapse=\"true\" title=\"Session info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P assertthat 0.2.1 2019-03-21 [?] RSPM (R 4.5.0)\n P backports 1.5.0 2024-05-23 [?] RSPM (R 4.5.0)\n boot 1.3-32 2025-08-29 [2] CRAN (R 4.5.2)\n P broom 1.0.12 2026-01-27 [?] RSPM (R 4.5.0)\n P callr 3.7.6 2024-03-25 [?] RSPM (R 4.5.0)\n P checkmate 2.3.4 2026-02-03 [?] RSPM (R 4.5.0)\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P curl 7.0.0 2025-08-19 [?] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P emmeans * 2.0.1 2025-12-16 [?] RSPM (R 4.5.0)\n P estimability 1.5.1 2024-05-12 [?] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P farver 2.1.2 2024-05-13 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n P forcats 1.0.1 2025-09-25 [?] RSPM (R 4.5.0)\n P foreach 1.5.2 2022-02-02 [?] RSPM (R 4.5.0)\n P fs 1.6.6 2025-04-12 [?] RSPM (R 4.5.0)\n P furrr * 0.3.1 2022-08-15 [?] RSPM (R 4.5.0)\n P future * 1.69.0 2026-01-16 [?] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM (R 4.5.0)\n P glmnet 4.1-10 2025-07-17 [?] RSPM (R 4.5.0)\n P globals 0.19.0 2026-02-02 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n P gridExtra * 2.3 2017-09-09 [?] RSPM (R 4.5.0)\n P gt * 1.3.0 2026-01-22 [?] RSPM (R 4.5.0)\n P gtable 0.3.6 2024-10-25 [?] RSPM (R 4.5.0)\n P haven 2.5.5 2025-05-30 [?] RSPM (R 4.5.0)\n P hms 1.1.4 2025-10-17 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.5.0)\n P inline 0.3.21 2025-01-09 [?] RSPM (R 4.5.0)\n P iterators 1.0.14 2022-02-05 [?] RSPM (R 4.5.0)\n P jinjar 0.3.2 2025-03-13 [?] RSPM (R 4.5.0)\n P jomo 2.7-6 2023-04-15 [?] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P labeling 0.4.3 2023-08-29 [?] RSPM (R 4.5.0)\n P labelled * 2.16.0 2025-10-22 [?] RSPM (R 4.5.0)\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P listenv 0.10.0 2025-11-02 [?] RSPM (R 4.5.0)\n P lme4 1.1-38 2025-12-02 [?] RSPM (R 4.5.0)\n P loo 2.9.0 2025-12-23 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P matrixStats 1.5.0 2025-01-07 [?] RSPM (R 4.5.0)\n P mice * 3.19.0 2025-12-10 [?] RSPM (R 4.5.0)\n P minqa 1.2.8 2024-08-17 [?] RSPM (R 4.5.0)\n P mitml 0.4-5 2023-03-08 [?] RSPM (R 4.5.0)\n P mmrm * 0.3.17 2026-01-08 [?] RSPM (R 4.5.0)\n P multcomp 1.4-29 2025-10-20 [?] RSPM (R 4.5.0)\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM (R 4.5.0)\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n P nloptr 2.2.1 2025-03-17 [?] RSPM (R 4.5.0)\n nnet 7.3-20 2025-01-01 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM (R 4.5.0)\n P pan 1.9 2023-12-07 [?] RSPM (R 4.5.0)\n P parallelly * 1.46.1 2026-01-08 [?] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgbuild 1.4.8 2025-05-26 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P processx 3.8.6 2025-02-21 [?] RSPM (R 4.5.0)\n P ps 1.9.1 2025-04-12 [?] RSPM (R 4.5.0)\n P purrr * 1.2.1 2026-01-09 [?] RSPM (R 4.5.0)\n P QuickJSR 1.9.0 2026-01-25 [?] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n P rbibutils 2.4.1 2026-01-21 [?] RSPM (R 4.5.0)\n P rbmi * 1.6.0 2026-01-23 [?] RSPM (R 4.5.0)\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM (R 4.5.0)\n P Rcpp 1.1.1 2026-01-10 [?] RSPM (R 4.5.0)\n P RcppParallel 5.1.11-1 2025-08-27 [?] RSPM (R 4.5.0)\n P Rdpack 2.6.6 2026-02-08 [?] RSPM (R 4.5.0)\n P reformulas 0.4.4 2026-02-02 [?] RSPM (R 4.5.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n rpart 4.1.24 2025-01-07 [2] CRAN (R 4.5.2)\n P rstan * 2.32.7 2025-03-10 [?] RSPM (R 4.5.0)\n P S7 0.2.1 2025-11-14 [?] RSPM (R 4.5.0)\n P sandwich 3.1-1 2024-09-15 [?] RSPM (R 4.5.0)\n P sass 0.4.10 2025-04-11 [?] RSPM (R 4.5.0)\n P scales 1.4.0 2025-04-24 [?] RSPM (R 4.5.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.5.0)\n P shape 1.4.6.1 2024-02-23 [?] RSPM (R 4.5.0)\n P StanHeaders * 2.32.10 2024-07-15 [?] RSPM (R 4.5.0)\n P stringi 1.8.7 2025-03-27 [?] RSPM (R 4.5.0)\n P stringr 1.6.0 2025-11-04 [?] RSPM (R 4.5.0)\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P TH.data 1.1-5 2025-11-17 [?] RSPM (R 4.5.0)\n P tibble 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyr * 1.3.2 2025-12-19 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n P TMB 1.9.19 2025-12-15 [?] RSPM (R 4.5.0)\n P utf8 1.2.6 2025-06-08 [?] RSPM (R 4.5.0)\n P V8 8.0.1 2025-10-10 [?] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n P xml2 1.5.2 2026-01-17 [?] RSPM (R 4.5.0)\n P xtable 1.8-4 2019-04-21 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n P zoo 1.8-15 2025-12-15 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [ "tipping_point_files" ], diff --git a/_freeze/R/tipping_point/figure-html/MAR_v1_tipping_point_visualization-1.png b/_freeze/R/tipping_point/figure-html/MAR_v1_tipping_point_visualization-1.png index 63a4b57b4..950aff8e2 100644 Binary files a/_freeze/R/tipping_point/figure-html/MAR_v1_tipping_point_visualization-1.png and b/_freeze/R/tipping_point/figure-html/MAR_v1_tipping_point_visualization-1.png differ diff --git a/_freeze/R/tipping_point/figure-html/MAR_v2_tipping_point_visualization-1.png b/_freeze/R/tipping_point/figure-html/MAR_v2_tipping_point_visualization-1.png index baa6017cd..204ad7103 100644 Binary files a/_freeze/R/tipping_point/figure-html/MAR_v2_tipping_point_visualization-1.png and b/_freeze/R/tipping_point/figure-html/MAR_v2_tipping_point_visualization-1.png differ diff --git a/_freeze/R/tipping_point/figure-html/data_exploration_4-1.png b/_freeze/R/tipping_point/figure-html/data_exploration_4-1.png index 43f1472a2..89dc44f52 100644 Binary files a/_freeze/R/tipping_point/figure-html/data_exploration_4-1.png and b/_freeze/R/tipping_point/figure-html/data_exploration_4-1.png differ diff --git a/_freeze/R/tobit regression/execute-results/html.json b/_freeze/R/tobit regression/execute-results/html.json index 3d70d4296..1e9defd15 100644 --- a/_freeze/R/tobit regression/execute-results/html.json +++ b/_freeze/R/tobit regression/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "47d4dde00d6f4f1728a169723ab25aa0", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Tobit Regression\"\n---\n\n# Tobit regression\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# General\nlibrary(dplyr)\nlibrary(gt)\nlibrary(broom)\n\n# Methodology specific\nlibrary(emmeans)\nlibrary(censReg)\nlibrary(survival)\nlibrary(VGAM)\n```\n:::\n\n\n## Tobit model\n\nCensoring occurs when data on the dependent variable is only partially known. For example, in virology, sample results could be below the lower limit of detection (eg, 100 copies/mL) and in such a case we only know that the sample result is <100 copies/mL, but we don't know the exact value.\n\nLet $y^{*}$ be the the true underlying latent variable, and $y$ the observed variable. We discuss here censoring on the left:\n\n$$\ny =\n\\begin{cases}\n y^{*}, & y^{*} > \\tau \\\\\n \\tau, & y^{*} \\leq \\tau\n \\end{cases} \n$$\nWe consider tobit regression with a censored normal distribution. The model equation is\n$$\ny_{i}^{*} = X_{i}\\beta + \\epsilon_{i} \n$$\nwith $\\epsilon_{i} \\sim N(0,\\sigma^2)$. But we only observe $y = max(\\tau, y^{*})$.\nThe tobit model uses maximum likelihood estimation (for details see for example Breen, 1996). It is important to note that $\\beta$ estimates the effect of $x$ on the latent variable $y^{*}$, and not on the observed value $y$.\n\n\n## Data used\n\nWe assume two equally sized groups (n=10 in each group). The data is censored on the left at a value of $\\tau=8.0$.\nIn group A 4/10 records are censored, and 1/10 in group B.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_used = tribble(\n ~ID, ~ARM, ~Y, ~CENS,\n \"001\", \"A\", 8.0, 1, \n \"002\", \"A\", 8.0, 1,\n \"003\", \"A\", 8.0, 1,\n \"004\", \"A\", 8.0, 1,\n \"005\", \"A\", 8.9, 0,\n \"006\", \"A\", 9.5, 0,\n \"007\", \"A\", 9.9, 0,\n \"008\", \"A\", 10.3, 0,\n \"009\", \"A\", 11.0, 0,\n \"010\", \"A\", 11.2, 0,\n \"011\", \"B\", 8.0, 1, \n \"012\", \"B\", 9.2, 0,\n \"013\", \"B\", 9.9, 0,\n \"014\", \"B\", 10.0, 0,\n \"015\", \"B\", 10.6, 0,\n \"016\", \"B\", 10.6, 0,\n \"017\", \"B\", 11.3, 0,\n \"018\", \"B\", 11.8, 0,\n \"019\", \"B\", 12.9, 0,\n \"020\", \"B\", 13.0, 0,\n)\n\ngt(dat_used)\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n \n
IDARMYCENS
001A8.01
002A8.01
003A8.01
004A8.01
005A8.90
006A9.50
007A9.90
008A10.30
009A11.00
010A11.20
011B8.01
012B9.20
013B9.90
014B10.00
015B10.60
016B10.60
017B11.30
018B11.80
019B12.90
020B13.00
\n
\n```\n\n:::\n:::\n\n\n## Example Code using R\n\nThe analysis will be based on a Tobit analysis of variance with $Y$, rounded to 1 decimal places, as dependent variable and study group as a fixed covariate. A normally distributed error term will be used. Values will be left censored at the value 8.0.\n\nSeveral R functions and packages are presented.\n\n### censReg\n\nThe `censReg` function from the `censReg` package performs tobit models for left and right censored. The model is estimated by Maximum Likelihood (ML) assuming a Gaussian (normal) distribution of the error term. The maximization of the likelihood function is done by function `maxLik` of the `maxLik` package. The optimization method can be changed.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres_censreg = censReg::censReg(Y ~ ARM, left = 8.0, data = dat_used)\nsummary(res_censreg)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\ncensReg::censReg(formula = Y ~ ARM, left = 8, data = dat_used)\n\nObservations:\n Total Left-censored Uncensored Right-censored \n 20 5 15 0 \n\nCoefficients:\n Estimate Std. error t value Pr(> t) \n(Intercept) 8.8323 0.5918 14.925 < 2e-16 ***\nARMB 1.8225 0.8061 2.261 0.02376 * \nlogSigma 0.5491 0.1947 2.819 0.00481 ** \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nNewton-Raphson maximisation, 4 iterations\nReturn code 1: gradient close to zero (gradtol)\nLog-likelihood: -34.3154 on 3 Df\n```\n\n\n:::\n\n```{.r .cell-code}\n# Difference between groups (Wald CIs)\nround(res_censreg$estimate[2], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ARMB \n1.823 \n```\n\n\n:::\n\n```{.r .cell-code}\nround(stats::confint(res_censreg, level = 0.95)[2, ], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 % \n 0.243 3.402 \n```\n\n\n:::\n:::\n\n\nThe output provides an estimate of difference between groups A and B (B-A), namely 1.8225 (se=0.8061). The presented p-value is a two-sided p-value based on the Z-test. The output also provides an estimate for $log(\\sigma) = 0.5491$. Wald based confidence intervals can be obtained by the `stats::confint` function.\n\n### survreg\nUsing the `survreg` function from the `survival` package a tobit model can be fit. For more information, refer to the [survival package](https://cran.r-project.org/web/packages/survival/index.html).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres_survreg = survival::survreg(\n survival::Surv(Y, 1 - CENS, type = \"left\") ~ ARM,\n dist = \"gaussian\",\n data = dat_used\n)\nsummary(res_survreg)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nsurvival::survreg(formula = survival::Surv(Y, 1 - CENS, type = \"left\") ~ \n ARM, data = dat_used, dist = \"gaussian\")\n Value Std. Error z p\n(Intercept) 8.832 0.592 14.92 <2e-16\nARMB 1.823 0.806 2.26 0.0238\nLog(scale) 0.549 0.195 2.82 0.0048\n\nScale= 1.73 \n\nGaussian distribution\nLoglik(model)= -34.3 Loglik(intercept only)= -36.7\n\tChisq= 4.72 on 1 degrees of freedom, p= 0.03 \nNumber of Newton-Raphson Iterations: 4 \nn= 20 \n```\n\n\n:::\n\n```{.r .cell-code}\n# Least square means by group\nlsm = emmeans::emmeans(res_survreg, specs = trt.vs.ctrl ~ ARM)\nlsm$emmeans\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ARM emmean SE df lower.CL upper.CL\n A 8.83 0.592 17 7.58 10.1\n B 10.65 0.552 17 9.49 11.8\n\nResults are given on the ::.survival.Surv (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\n# Difference between groups\nlsm_contrast = broom::tidy(lsm$contrasts, conf.int = TRUE, conf.level = 0.95)\nlsm_contrast |>\n gt() |>\n fmt_number(decimals = 3)\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n \n
termcontrastnull.valueestimatestd.errordfconf.lowconf.highstatisticp.value
ARMB - A0.0001.8230.80617.0000.1223.5232.2610.037
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# Wald-based CIs\nround(stats::confint(res_survreg, level = 0.95)[2, ], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 % \n 0.243 3.402 \n```\n\n\n:::\n:::\n\n\nThe output provides an estimate of difference between groups A and B (B-A), namely 1.823 (se=0.806). The presented p-value is a two-sided p-value based on the Z-test. The output also provides an estimate for $log(\\sigma) = 0.549$. Using the `emmeans` package/function least square means and contrast can be easily obtained. The confidence intervals and p-value is based on the t-test using `emmeans`. Wald based confidence intervals can be obtained by the `stats::confint` function.\n\n### vglm\n\nThe `VGAM` package provides functions for fitting vector generalized linear and additive models (VGLMs and VGAMs). This package centers on the iteratively reweighted least squares (IRLS) algorithm. The `vglm` function offers the possibility to fit a tobit model.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres_vglm = VGAM::vglm(Y ~ ARM, tobit(Lower = 8.0), data = dat_used)\nsummary(res_vglm)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nVGAM::vglm(formula = Y ~ ARM, family = tobit(Lower = 8), data = dat_used)\n\nCoefficients: \n Estimate Std. Error z value Pr(>|z|) \n(Intercept):1 8.8323 0.5727 15.422 < 2e-16 ***\n(Intercept):2 0.5491 0.1807 3.039 0.00238 ** \nARMB 1.8226 0.7942 2.295 0.02173 * \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nNames of linear predictors: mu, loglink(sd)\n\nLog-likelihood: -34.3154 on 37 degrees of freedom\n\nNumber of Fisher scoring iterations: 7 \n```\n\n\n:::\n\n```{.r .cell-code}\n# Difference between groups\nround(res_vglm@coefficients[3], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ARMB \n1.823 \n```\n\n\n:::\n\n```{.r .cell-code}\nround(VGAM::confintvglm(res_vglm, level = 0.95)[3, ], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 % \n 0.266 3.379 \n```\n\n\n:::\n:::\n\n\nThe output provides an estimate of difference between groups A and B (B-A), namely 1.823 (se=0.794). The presented p-value is a two-sided p-value based on the Z-test. Note that point estimate for the difference (and associated SE) are slightly different from the results obtained by `censReg` and `tobit` due to the difference in estimation procedure used. Wald based confidence intervals can be obtained by the `confintvglm` function. The $(Intercept):2$ in the model output is an estimate for $log(\\sigma)$.\n\n## Discussion\nThe results from the `censReg::censReg` and `survival::survreg` are similar. The `survival::survreg` allows for easy incorporation with the `emmeans` package (note: be aware that the standard approach with emmeans is based on the t-test and not the Z-test).\n\nThe `VGAM::vglm` approach provides slightly different results. This difference comes from the fact that a iteratively reweighted least squares (IRLS) algorithm is used for estimation.\n\n\n## Reference\n\nBreen, R. (1996). Regression models. SAGE Publications, Inc., https://doi.org/10.4135/9781412985611\n\nTobin, James (1958). \"Estimation of Relationships for Limited Dependent Variables\". Econometrica. 26 (1): 24-36. doi:10.2307/1907382\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P backports 1.5.0 2024-05-23 [?] RSPM\n P bdsmatrix 1.3-7 2024-03-02 [?] RSPM\n P broom * 1.0.12 2026-01-27 [?] RSPM\n P censReg * 0.5-38 2024-05-20 [?] RSPM\n P cli 3.6.5 2025-04-23 [?] RSPM\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P collapse 2.1.6 2026-01-11 [?] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P emmeans * 2.0.1 2025-12-16 [?] RSPM\n P estimability 1.5.1 2024-05-12 [?] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P Formula 1.2-5 2023-02-24 [?] RSPM\n P fs 1.6.6 2025-04-12 [?] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P glmmML 1.1.7 2024-09-20 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n P gt * 1.3.0 2026-01-22 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P lmtest 0.9-40 2022-03-21 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P maxLik * 1.5-2.2 2025-12-29 [?] RSPM\n P miscTools * 0.6-30 2026-01-20 [?] RSPM\n P multcomp 1.4-29 2025-10-20 [?] RSPM\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P plm 2.6-7 2025-11-13 [?] RSPM\n P purrr 1.2.1 2026-01-09 [?] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n P rbibutils 2.4.1 2026-01-21 [?] RSPM\n P Rcpp 1.1.1 2026-01-10 [?] RSPM\n P Rdpack 2.6.6 2026-02-08 [?] RSPM\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n P sandwich 3.1-1 2024-09-15 [?] RSPM\n P sass 0.4.10 2025-04-11 [?] RSPM\n P sessioninfo 1.2.3 2025-02-05 [?] RSPM\n survival * 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n P TH.data 1.1-5 2025-11-17 [?] RSPM\n P tibble 3.3.1 2026-01-11 [?] RSPM\n P tidyr 1.3.2 2025-12-19 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n P VGAM * 1.1-14 2025-12-04 [?] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n P xml2 1.5.2 2026-01-17 [?] RSPM\n P xtable 1.8-4 2019-04-21 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n P zoo 1.8-15 2025-12-15 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", + "markdown": "---\ntitle: \"Tobit Regression\"\n---\n\n# Tobit regression\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# General\nlibrary(dplyr)\nlibrary(gt)\nlibrary(broom)\n\n# Methodology specific\nlibrary(emmeans)\nlibrary(censReg)\nlibrary(survival)\nlibrary(VGAM)\n```\n:::\n\n\n## Tobit model\n\nCensoring occurs when data on the dependent variable is only partially known. For example, in virology, sample results could be below the lower limit of detection (eg, 100 copies/mL) and in such a case we only know that the sample result is <100 copies/mL, but we don't know the exact value.\n\nLet $y^{*}$ be the the true underlying latent variable, and $y$ the observed variable. We discuss here censoring on the left:\n\n$$\ny =\n\\begin{cases}\n y^{*}, & y^{*} > \\tau \\\\\n \\tau, & y^{*} \\leq \\tau\n \\end{cases} \n$$\nWe consider tobit regression with a censored normal distribution. The model equation is\n$$\ny_{i}^{*} = X_{i}\\beta + \\epsilon_{i} \n$$\nwith $\\epsilon_{i} \\sim N(0,\\sigma^2)$. But we only observe $y = max(\\tau, y^{*})$.\nThe tobit model uses maximum likelihood estimation (for details see for example Breen, 1996). It is important to note that $\\beta$ estimates the effect of $x$ on the latent variable $y^{*}$, and not on the observed value $y$.\n\n\n## Data used\n\nWe assume two equally sized groups (n=10 in each group). The data is censored on the left at a value of $\\tau=8.0$.\nIn group A 4/10 records are censored, and 1/10 in group B.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndat_used = tribble(\n ~ID, ~ARM, ~Y, ~CENS,\n \"001\", \"A\", 8.0, 1, \n \"002\", \"A\", 8.0, 1,\n \"003\", \"A\", 8.0, 1,\n \"004\", \"A\", 8.0, 1,\n \"005\", \"A\", 8.9, 0,\n \"006\", \"A\", 9.5, 0,\n \"007\", \"A\", 9.9, 0,\n \"008\", \"A\", 10.3, 0,\n \"009\", \"A\", 11.0, 0,\n \"010\", \"A\", 11.2, 0,\n \"011\", \"B\", 8.0, 1, \n \"012\", \"B\", 9.2, 0,\n \"013\", \"B\", 9.9, 0,\n \"014\", \"B\", 10.0, 0,\n \"015\", \"B\", 10.6, 0,\n \"016\", \"B\", 10.6, 0,\n \"017\", \"B\", 11.3, 0,\n \"018\", \"B\", 11.8, 0,\n \"019\", \"B\", 12.9, 0,\n \"020\", \"B\", 13.0, 0,\n)\n\ngt(dat_used)\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n\n\n\n \n \n
IDARMYCENS
001A8.01
002A8.01
003A8.01
004A8.01
005A8.90
006A9.50
007A9.90
008A10.30
009A11.00
010A11.20
011B8.01
012B9.20
013B9.90
014B10.00
015B10.60
016B10.60
017B11.30
018B11.80
019B12.90
020B13.00
\n
\n```\n\n:::\n:::\n\n\n## Example Code using R\n\nThe analysis will be based on a Tobit analysis of variance with $Y$, rounded to 1 decimal places, as dependent variable and study group as a fixed covariate. A normally distributed error term will be used. Values will be left censored at the value 8.0.\n\nSeveral R functions and packages are presented.\n\n### censReg\n\nThe `censReg` function from the `censReg` package performs tobit models for left and right censored. The model is estimated by Maximum Likelihood (ML) assuming a Gaussian (normal) distribution of the error term. The maximization of the likelihood function is done by function `maxLik` of the `maxLik` package. The optimization method can be changed.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres_censreg = censReg::censReg(Y ~ ARM, left = 8.0, data = dat_used)\nsummary(res_censreg)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\ncensReg::censReg(formula = Y ~ ARM, left = 8, data = dat_used)\n\nObservations:\n Total Left-censored Uncensored Right-censored \n 20 5 15 0 \n\nCoefficients:\n Estimate Std. error t value Pr(> t) \n(Intercept) 8.8323 0.5918 14.925 < 2e-16 ***\nARMB 1.8225 0.8061 2.261 0.02376 * \nlogSigma 0.5491 0.1947 2.819 0.00481 ** \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nNewton-Raphson maximisation, 4 iterations\nReturn code 1: gradient close to zero (gradtol)\nLog-likelihood: -34.3154 on 3 Df\n```\n\n\n:::\n\n```{.r .cell-code}\n# Difference between groups (Wald CIs)\nround(res_censreg$estimate[2], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ARMB \n1.823 \n```\n\n\n:::\n\n```{.r .cell-code}\nround(stats::confint(res_censreg, level = 0.95)[2, ], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 % \n 0.243 3.402 \n```\n\n\n:::\n:::\n\n\nThe output provides an estimate of difference between groups A and B (B-A), namely 1.8225 (se=0.8061). The presented p-value is a two-sided p-value based on the Z-test. The output also provides an estimate for $log(\\sigma) = 0.5491$. Wald based confidence intervals can be obtained by the `stats::confint` function.\n\n### survreg\nUsing the `survreg` function from the `survival` package a tobit model can be fit. For more information, refer to the [survival package](https://cran.r-project.org/web/packages/survival/index.html).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres_survreg = survival::survreg(\n survival::Surv(Y, 1 - CENS, type = \"left\") ~ ARM,\n dist = \"gaussian\",\n data = dat_used\n)\nsummary(res_survreg)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nsurvival::survreg(formula = survival::Surv(Y, 1 - CENS, type = \"left\") ~ \n ARM, data = dat_used, dist = \"gaussian\")\n Value Std. Error z p\n(Intercept) 8.832 0.592 14.92 <2e-16\nARMB 1.823 0.806 2.26 0.0238\nLog(scale) 0.549 0.195 2.82 0.0048\n\nScale= 1.73 \n\nGaussian distribution\nLoglik(model)= -34.3 Loglik(intercept only)= -36.7\n\tChisq= 4.72 on 1 degrees of freedom, p= 0.03 \nNumber of Newton-Raphson Iterations: 4 \nn= 20 \n```\n\n\n:::\n\n```{.r .cell-code}\n# Least square means by group\nlsm = emmeans::emmeans(res_survreg, specs = trt.vs.ctrl ~ ARM)\nlsm$emmeans\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ARM emmean SE df lower.CL upper.CL\n A 8.83 0.592 17 7.58 10.1\n B 10.65 0.552 17 9.49 11.8\n\nResults are given on the ::.survival.Surv (not the response) scale. \nConfidence level used: 0.95 \n```\n\n\n:::\n\n```{.r .cell-code}\n# Difference between groups\nlsm_contrast = broom::tidy(lsm$contrasts, conf.int = TRUE, conf.level = 0.95)\nlsm_contrast |>\n gt() |>\n fmt_number(decimals = 3)\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n \n
termcontrastnull.valueestimatestd.errordfconf.lowconf.highstatisticp.value
ARMB - A0.0001.8230.80617.0000.1223.5232.2610.037
\n
\n```\n\n:::\n\n```{.r .cell-code}\n# Wald-based CIs\nround(stats::confint(res_survreg, level = 0.95)[2, ], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 % \n 0.243 3.402 \n```\n\n\n:::\n:::\n\n\nThe output provides an estimate of difference between groups A and B (B-A), namely 1.823 (se=0.806). The presented p-value is a two-sided p-value based on the Z-test. The output also provides an estimate for $log(\\sigma) = 0.549$. Using the `emmeans` package/function least square means and contrast can be easily obtained. The confidence intervals and p-value is based on the t-test using `emmeans`. Wald based confidence intervals can be obtained by the `stats::confint` function.\n\n### vglm\n\nThe `VGAM` package provides functions for fitting vector generalized linear and additive models (VGLMs and VGAMs). This package centers on the iteratively reweighted least squares (IRLS) algorithm. The `vglm` function offers the possibility to fit a tobit model.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nres_vglm = VGAM::vglm(Y ~ ARM, tobit(Lower = 8.0), data = dat_used)\nsummary(res_vglm)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nVGAM::vglm(formula = Y ~ ARM, family = tobit(Lower = 8), data = dat_used)\n\nCoefficients: \n Estimate Std. Error z value Pr(>|z|) \n(Intercept):1 8.8323 0.5727 15.422 < 2e-16 ***\n(Intercept):2 0.5491 0.1807 3.039 0.00238 ** \nARMB 1.8226 0.7942 2.295 0.02173 * \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\nNames of linear predictors: mu, loglink(sd)\n\nLog-likelihood: -34.3154 on 37 degrees of freedom\n\nNumber of Fisher scoring iterations: 7 \n```\n\n\n:::\n\n```{.r .cell-code}\n# Difference between groups\nround(res_vglm@coefficients[3], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n ARMB \n1.823 \n```\n\n\n:::\n\n```{.r .cell-code}\nround(VGAM::confintvglm(res_vglm, level = 0.95)[3, ], 3)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n 2.5 % 97.5 % \n 0.266 3.379 \n```\n\n\n:::\n:::\n\n\nThe output provides an estimate of difference between groups A and B (B-A), namely 1.823 (se=0.794). The presented p-value is a two-sided p-value based on the Z-test. Note that point estimate for the difference (and associated SE) are slightly different from the results obtained by `censReg` and `tobit` due to the difference in estimation procedure used. Wald based confidence intervals can be obtained by the `confintvglm` function. The $(Intercept):2$ in the model output is an estimate for $log(\\sigma)$.\n\n## Discussion\nThe results from the `censReg::censReg` and `survival::survreg` are similar. The `survival::survreg` allows for easy incorporation with the `emmeans` package (note: be aware that the standard approach with emmeans is based on the t-test and not the Z-test).\n\nThe `VGAM::vglm` approach provides slightly different results. This difference comes from the fact that a iteratively reweighted least squares (IRLS) algorithm is used for estimation.\n\n\n## Reference\n\nBreen, R. (1996). Regression models. SAGE Publications, Inc., https://doi.org/10.4135/9781412985611\n\nTobin, James (1958). \"Estimation of Relationships for Limited Dependent Variables\". Econometrica. 26 (1): 24-36. doi:10.2307/1907382\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P backports 1.5.0 2024-05-23 [?] RSPM (R 4.5.0)\n P bdsmatrix 1.3-7 2024-03-02 [?] RSPM (R 4.5.0)\n P broom * 1.0.12 2026-01-27 [?] RSPM (R 4.5.0)\n P censReg * 0.5-38 2024-05-20 [?] RSPM (R 4.5.0)\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P collapse 2.1.6 2026-01-11 [?] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P emmeans * 2.0.1 2025-12-16 [?] RSPM (R 4.5.0)\n P estimability 1.5.1 2024-05-12 [?] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n P Formula 1.2-5 2023-02-24 [?] RSPM (R 4.5.0)\n P fs 1.6.6 2025-04-12 [?] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P glmmML 1.1.7 2024-09-20 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n P gt * 1.3.0 2026-01-22 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P lattice 0.22-7 2025-04-02 [?] RSPM (R 4.5.0)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P lmtest 0.9-40 2022-03-21 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n MASS 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P maxLik * 1.5-2.2 2025-12-29 [?] RSPM (R 4.5.0)\n P miscTools * 0.6-30 2026-01-20 [?] RSPM (R 4.5.0)\n P multcomp 1.4-29 2025-10-20 [?] RSPM (R 4.5.0)\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM (R 4.5.0)\n nlme 3.1-168 2025-03-31 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P plm 2.6-7 2025-11-13 [?] RSPM (R 4.5.0)\n P purrr 1.2.1 2026-01-09 [?] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n P rbibutils 2.4.1 2026-01-21 [?] RSPM (R 4.5.0)\n P Rcpp 1.1.1 2026-01-10 [?] RSPM (R 4.5.0)\n P Rdpack 2.6.6 2026-02-08 [?] RSPM (R 4.5.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.3)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n P sandwich 3.1-1 2024-09-15 [?] RSPM (R 4.5.0)\n P sass 0.4.10 2025-04-11 [?] RSPM (R 4.5.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.5.0)\n P survival * 3.8-3 2024-12-17 [?] RSPM (R 4.5.0)\n P TH.data 1.1-5 2025-11-17 [?] RSPM (R 4.5.0)\n P tibble 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyr 1.3.2 2025-12-19 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n P VGAM * 1.1-14 2025-12-04 [?] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n P xml2 1.5.2 2026-01-17 [?] RSPM (R 4.5.0)\n P xtable 1.8-4 2019-04-21 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n P zoo 1.8-15 2025-12-15 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/ttest_1Sample/execute-results/html.json b/_freeze/R/ttest_1Sample/execute-results/html.json index d58a70643..e66e53bf7 100644 --- a/_freeze/R/ttest_1Sample/execute-results/html.json +++ b/_freeze/R/ttest_1Sample/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "7071de222d88aac85b3ac3d2969448d0", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"One Sample t-test\"\n---\n\n\n\nThe One Sample t-test is used to compare a single sample against an expected hypothesis value. In the One Sample t-test, the mean of the sample is compared against the hypothesis value. In R, a One Sample t-test can be performed using the Base R `t.test()` from the **stats** package or the `proc_ttest()` function from the **procs** package.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\nread <- tibble::tribble(\n ~score, ~count,\n 40, 2, 47, 2, 52, 2, 26, 1, 19, 2,\n 25, 2, 35, 4, 39, 1, 26, 1, 48, 1,\n 14, 2, 22, 1, 42, 1, 34, 2 , 33, 2,\n 18, 1, 15, 1, 29, 1, 41, 2, 44, 1,\n 51, 1, 43, 1, 27, 2, 46, 2, 28, 1,\n 49, 1, 31, 1, 28, 1, 54, 1, 45, 1\n)\n```\n:::\n\n\n## Normal Data {#normal}\n\nBy default, the R one sample t-test functions assume normality in the data and use a classic Student's t-test.\n\n### Base R\n\n#### Code\n\nThe following code was used to test the comparison in Base R. Note that the baseline null hypothesis goes in the \"mu\" parameter.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform t-test\nstats::t.test(read$score, mu = 30)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tOne Sample t-test\n\ndata: read$score\nt = 2.3643, df = 29, p-value = 0.02497\nalternative hypothesis: true mean is not equal to 30\n95 percent confidence interval:\n 30.67928 39.38739\nsample estimates:\nmean of x \n 35.03333 \n```\n\n\n:::\n:::\n\n\n### Procs Package\n\n#### Code\n\nThe following code from the **procs** package was used to perform a one sample t-test. Note that the null hypothesis value goes in the \"options\" parameter.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(procs)\n\n# Perform t-test\nprocs::proc_ttest(read, var = score, options = c(\"h0\" = 30))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR N MEAN STD STDERR MIN MAX\n1 score 30 35.03333 11.66038 2.128884 14 54\n\n$ConfLimits\n VAR MEAN LCLM UCLM STD LCLMSTD UCLMSTD\n1 score 35.03333 30.67928 39.38739 11.66038 9.286404 15.67522\n\n$TTests\n VAR DF T PROBT\n1 score 29 2.364306 0.0249741\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/onesample_rtest1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe Base R `t.test()` function does not have an option for lognormal data. Likewise, the **procs** `proc_ttest()` function also does not have an option for lognormal data.\n\nOne possibility may be the `tTestLnormAltPower()` function from the **EnvStats** package. This package has not been evaluated yet.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P knitr 1.51 2025-12-20 [?] RSPM\n P procs * 1.0.7 2025-07-27 [?] RSPM\n P tibble 3.3.1 2026-01-11 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", + "markdown": "---\ntitle: \"One Sample t-test\"\n---\n\n\n\nThe One Sample t-test is used to compare a single sample against an expected hypothesis value. In the One Sample t-test, the mean of the sample is compared against the hypothesis value. In R, a One Sample t-test can be performed using the Base R `t.test()` from the **stats** package or the `proc_ttest()` function from the **procs** package.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\nread <- tibble::tribble(\n ~score, ~count,\n 40, 2, 47, 2, 52, 2, 26, 1, 19, 2,\n 25, 2, 35, 4, 39, 1, 26, 1, 48, 1,\n 14, 2, 22, 1, 42, 1, 34, 2 , 33, 2,\n 18, 1, 15, 1, 29, 1, 41, 2, 44, 1,\n 51, 1, 43, 1, 27, 2, 46, 2, 28, 1,\n 49, 1, 31, 1, 28, 1, 54, 1, 45, 1\n)\n```\n:::\n\n\n## Normal Data {#normal}\n\nBy default, the R one sample t-test functions assume normality in the data and use a classic Student's t-test.\n\n### Base R\n\n#### Code\n\nThe following code was used to test the comparison in Base R. Note that the baseline null hypothesis goes in the \"mu\" parameter.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform t-test\nstats::t.test(read$score, mu = 30)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tOne Sample t-test\n\ndata: read$score\nt = 2.3643, df = 29, p-value = 0.02497\nalternative hypothesis: true mean is not equal to 30\n95 percent confidence interval:\n 30.67928 39.38739\nsample estimates:\nmean of x \n 35.03333 \n```\n\n\n:::\n:::\n\n\n### Procs Package\n\n#### Code\n\nThe following code from the **procs** package was used to perform a one sample t-test. Note that the null hypothesis value goes in the \"options\" parameter.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(procs)\n\n# Perform t-test\nprocs::proc_ttest(read, var = score, options = c(\"h0\" = 30))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR N MEAN STD STDERR MIN MAX\n1 score 30 35.03333 11.66038 2.128884 14 54\n\n$ConfLimits\n VAR MEAN LCLM UCLM STD LCLMSTD UCLMSTD\n1 score 35.03333 30.67928 39.38739 11.66038 9.286404 15.67522\n\n$TTests\n VAR DF T PROBT\n1 score 29 2.364306 0.0249741\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/onesample_rtest1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe Base R `t.test()` function does not have an option for lognormal data. Likewise, the **procs** `proc_ttest()` function also does not have an option for lognormal data.\n\nOne possibility may be the `tTestLnormAltPower()` function from the **EnvStats** package. This package has not been evaluated yet.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n P procs * 1.0.7 2025-07-27 [?] RSPM (R 4.5.0)\n P tibble 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/ttest_2Sample/execute-results/html.json b/_freeze/R/ttest_2Sample/execute-results/html.json index 68038ca6b..b98aa4025 100644 --- a/_freeze/R/ttest_2Sample/execute-results/html.json +++ b/_freeze/R/ttest_2Sample/execute-results/html.json @@ -2,10 +2,8 @@ "hash": "7f6be7c4d57b8675807cbc154d96c507", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Two Sample t-test\"\noutput: html_document\n---\n\n\n\n# **Two Sample t-test in R**\n\nThe Two Sample t-test is used to compare two independent samples against each other. In the Two Sample t-test, the mean of the first sample is compared against the mean of the second sample. In R, a Two Sample t-test can be performed using the Base R `t.test()` function from the **stats** package or the `proc_ttest()` function from the **procs** package.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\nd1 <- tibble::tribble(\n ~trt_grp,\t~WtGain,\n \"placebo\",\t94, \"placebo\",\t12, \"placebo\",\t26, \"placebo\",\t89,\n \"placebo\",\t88, \"placebo\",\t96, \"placebo\",\t85, \"placebo\",\t130,\n \"placebo\",\t75, \"placebo\",\t54, \"placebo\",\t112, \"placebo\",\t69,\n \"placebo\",\t104, \"placebo\",\t95, \"placebo\",\t53, \"placebo\",\t21,\n \"treatment\",\t45, \"treatment\",\t62, \"treatment\",\t96, \"treatment\",\t128,\n \"treatment\",\t120, \"treatment\",\t99, \"treatment\",\t28, \"treatment\",\t50,\n \"treatment\",\t109, \"treatment\",\t115, \"treatment\",\t39, \"treatment\",\t96,\n \"treatment\",\t87, \"treatment\",\t100, \"treatment\",\t76, \"treatment\",\t80\n)\n```\n:::\n\n\n## Base R\n\nIf we have normalized data, we can use the classic Student's t-test. For a Two sample test where the variances are not equal, we should use the Welch's t-test. Both of those options are available with the Base R `t.test()` function.\n\n### Student's T-Test {#baseS}\n\n#### Code\n\nThe following code was used to test the comparison in Base R. By default, the R two sample t-test function assumes the variances in the data are unequal, and uses a Welch's t-test. Therefore, to use a classic Student's t-test with normalized data, we must specify `var.equal = TRUE`. Also note that we must separate the single variable into two variables to satisfy the `t.test()` syntax and set `paired = FALSE`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nd1p <- dplyr::filter(d1, trt_grp == 'placebo')\nd1t <- dplyr::filter(d1, trt_grp == 'treatment')\n\n# Perform t-test\nstats::t.test(d1p$WtGain, d1t$WtGain, var.equal = TRUE, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tTwo Sample t-test\n\ndata: d1p$WtGain and d1t$WtGain\nt = -0.6969, df = 30, p-value = 0.4912\nalternative hypothesis: true difference in means is not equal to 0\n95 percent confidence interval:\n -31.19842 15.32342\nsample estimates:\nmean of x mean of y \n 75.1875 83.1250 \n```\n\n\n:::\n:::\n\n\n### Welch's T-Test {#baseW}\n\n#### Code\n\nThe following code was used to test the comparison in Base R using Welch's t-test. Observe that in this case, the `var.equal` parameter is set to FALSE. The Satterthwaite approximation is used to calculate the effective degrees of freedom.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nd1p <- dplyr::filter(d1, trt_grp == 'placebo')\nd1t <- dplyr::filter(d1, trt_grp == 'treatment')\n\n# Perform t-test\nstats::t.test(d1p$WtGain, d1t$WtGain, var.equal = FALSE, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWelch Two Sample t-test\n\ndata: d1p$WtGain and d1t$WtGain\nt = -0.6969, df = 29.694, p-value = 0.4913\nalternative hypothesis: true difference in means is not equal to 0\n95 percent confidence interval:\n -31.20849 15.33349\nsample estimates:\nmean of x mean of y \n 75.1875 83.1250 \n```\n\n\n:::\n:::\n\n\n\n## Procs Package\n\n### Student's T-Test and Welch's T-Test {#procs}\n\n#### Code\n\nThe following code from the **procs** package was used to perform a two sample t-test. Note that the `proc_ttest()` function performs both the Student's t-test and Welch's t-test (the Satterthwaite approximation is used to calculate the effective degrees of freedom) in the same call. The results are displayed on separate rows. This output is similar to SAS.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform t-test\nprocs::proc_ttest(d1, var = WtGain, class = trt_grp)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR CLASS METHOD N MEAN STD STDERR MIN MAX\n1 WtGain placebo 16 75.1875 33.81167 8.452918 12 130\n2 WtGain treatment 16 83.1250 30.53495 7.633738 28 128\n3 WtGain Diff (1-2) Pooled NA -7.9375 NA 11.389723 NA NA\n4 WtGain Diff (1-2) Satterthwaite NA -7.9375 NA 11.389723 NA NA\n\n$ConfLimits\n VAR CLASS METHOD MEAN LCLM UCLM STD LCLMSTD\n1 WtGain placebo 75.1875 57.17053 93.20447 33.81167 24.97685\n2 WtGain treatment 83.1250 66.85407 99.39593 30.53495 22.55632\n3 WtGain Diff (1-2) Pooled -7.9375 -31.19842 15.32342 NA NA\n4 WtGain Diff (1-2) Satterthwaite -7.9375 -31.20849 15.33349 NA NA\n UCLMSTD\n1 52.33003\n2 47.25868\n3 NA\n4 NA\n\n$TTests\n VAR METHOD VARIANCES DF T PROBT\n1 WtGain Pooled Equal 30.00000 -0.6969002 0.4912306\n2 WtGain Satterthwaite Unequal 29.69359 -0.6969002 0.4912856\n\n$Equality\n VAR METHOD NDF DDF FVAL PROBF\n1 WtGain Folded F 15 15 1.226136 0.6980614\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/twosample_rtest1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n## Example with unequal variances\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\nd2 <- tibble::tribble(\n ~trt_grp,\t~WtGain,\n \"placebo\",\t14, \"placebo\",\t15, \"placebo\",\t15, \"placebo\",\t15,\n \"placebo\",\t16, \"placebo\",\t18, \"placebo\",\t22, \"placebo\",\t23,\n \"placebo\",\t24, \"placebo\",\t25, \"placebo\",\t25,\n \"treatment\",\t10, \"treatment\",\t12, \"treatment\",\t14, \"treatment\",\t15,\n \"treatment\",\t18, \"treatment\",\t22, \"treatment\",\t24, \"treatment\",\t27,\n \"treatment\",\t31, \"treatment\",\t33, \"treatment\",\t34, \"treatment\",\t34,\n \"treatment\",\t34,\n)\n\nd2p <- dplyr::filter(d2, trt_grp == 'placebo')\nd2t <- dplyr::filter(d2, trt_grp == 'treatment')\n\n# Perform t-test\nstats::t.test(d2p$WtGain, d2t$WtGain, var.equal = FALSE, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWelch Two Sample t-test\n\ndata: d2p$WtGain and d2t$WtGain\nt = -1.5379, df = 18.137, p-value = 0.1413\nalternative hypothesis: true difference in means is not equal to 0\n95 percent confidence interval:\n -10.453875 1.614714\nsample estimates:\nmean of x mean of y \n 19.27273 23.69231 \n```\n\n\n:::\n\n```{.r .cell-code}\nprocs::proc_ttest(d2, var = WtGain, class = trt_grp)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR CLASS METHOD N MEAN STD STDERR MIN MAX\n1 WtGain placebo 11 19.27273 4.518648 1.362424 14 25\n2 WtGain treatment 13 23.69231 9.123090 2.530290 10 34\n3 WtGain Diff (1-2) Pooled NA -4.41958 NA 3.029355 NA NA\n4 WtGain Diff (1-2) Satterthwaite NA -4.41958 NA 2.873772 NA NA\n\n$ConfLimits\n VAR CLASS METHOD MEAN LCLM UCLM STD\n1 WtGain placebo 19.27273 16.23706 22.308396 4.518648\n2 WtGain treatment 23.69231 18.17928 29.205336 9.123090\n3 WtGain Diff (1-2) Pooled -4.41958 -10.70208 1.862918 NA\n4 WtGain Diff (1-2) Satterthwaite -4.41958 -10.45387 1.614714 NA\n LCLMSTD UCLMSTD\n1 3.157257 7.929927\n2 6.542040 15.059805\n3 NA NA\n4 NA NA\n\n$TTests\n VAR METHOD VARIANCES DF T PROBT\n1 WtGain Pooled Equal 22.00000 -1.458918 0.1587188\n2 WtGain Satterthwaite Unequal 18.13738 -1.537902 0.1413355\n\n$Equality\n VAR METHOD NDF DDF FVAL PROBF\n1 WtGain Folded F 12 10 4.076307 0.03338774\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/twosample_rtest2.png){fig-align='center' width=70%}\n:::\n:::\n\n\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-26\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM\n P common * 1.1.4 2025-12-08 [?] RSPM\n P crayon 1.5.3 2024-06-20 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P fmtr 1.7.2 2026-01-25 [?] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n P jpeg 0.1-11 2025-03-21 [?] RSPM\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P procs * 1.0.7 2025-07-27 [?] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n P Rcpp 1.1.1 2026-01-10 [?] RSPM\n P reporter 1.4.6 2026-02-07 [?] RSPM\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P sasLM 0.10.7 2025-09-28 [?] RSPM\n P stringi 1.8.7 2025-03-27 [?] RSPM\n P tibble * 3.3.1 2026-01-11 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n utf8 1.2.6 2025-06-08 [1] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P zip 2.3.3 2025-05-13 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", - "supporting": [ - "ttest_2Sample_files" - ], + "markdown": "---\ntitle: \"Two Sample t-test\"\noutput: html_document\n---\n\n\n\n# **Two Sample t-test in R**\n\nThe Two Sample t-test is used to compare two independent samples against each other. In the Two Sample t-test, the mean of the first sample is compared against the mean of the second sample. In R, a Two Sample t-test can be performed using the Base R `t.test()` function from the **stats** package or the `proc_ttest()` function from the **procs** package.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\nd1 <- tibble::tribble(\n ~trt_grp,\t~WtGain,\n \"placebo\",\t94, \"placebo\",\t12, \"placebo\",\t26, \"placebo\",\t89,\n \"placebo\",\t88, \"placebo\",\t96, \"placebo\",\t85, \"placebo\",\t130,\n \"placebo\",\t75, \"placebo\",\t54, \"placebo\",\t112, \"placebo\",\t69,\n \"placebo\",\t104, \"placebo\",\t95, \"placebo\",\t53, \"placebo\",\t21,\n \"treatment\",\t45, \"treatment\",\t62, \"treatment\",\t96, \"treatment\",\t128,\n \"treatment\",\t120, \"treatment\",\t99, \"treatment\",\t28, \"treatment\",\t50,\n \"treatment\",\t109, \"treatment\",\t115, \"treatment\",\t39, \"treatment\",\t96,\n \"treatment\",\t87, \"treatment\",\t100, \"treatment\",\t76, \"treatment\",\t80\n)\n```\n:::\n\n\n## Base R\n\nIf we have normalized data, we can use the classic Student's t-test. For a Two sample test where the variances are not equal, we should use the Welch's t-test. Both of those options are available with the Base R `t.test()` function.\n\n### Student's T-Test {#baseS}\n\n#### Code\n\nThe following code was used to test the comparison in Base R. By default, the R two sample t-test function assumes the variances in the data are unequal, and uses a Welch's t-test. Therefore, to use a classic Student's t-test with normalized data, we must specify `var.equal = TRUE`. Also note that we must separate the single variable into two variables to satisfy the `t.test()` syntax and set `paired = FALSE`.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nd1p <- dplyr::filter(d1, trt_grp == 'placebo')\nd1t <- dplyr::filter(d1, trt_grp == 'treatment')\n\n# Perform t-test\nstats::t.test(d1p$WtGain, d1t$WtGain, var.equal = TRUE, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tTwo Sample t-test\n\ndata: d1p$WtGain and d1t$WtGain\nt = -0.6969, df = 30, p-value = 0.4912\nalternative hypothesis: true difference in means is not equal to 0\n95 percent confidence interval:\n -31.19842 15.32342\nsample estimates:\nmean of x mean of y \n 75.1875 83.1250 \n```\n\n\n:::\n:::\n\n\n### Welch's T-Test {#baseW}\n\n#### Code\n\nThe following code was used to test the comparison in Base R using Welch's t-test. Observe that in this case, the `var.equal` parameter is set to FALSE. The Satterthwaite approximation is used to calculate the effective degrees of freedom.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nd1p <- dplyr::filter(d1, trt_grp == 'placebo')\nd1t <- dplyr::filter(d1, trt_grp == 'treatment')\n\n# Perform t-test\nstats::t.test(d1p$WtGain, d1t$WtGain, var.equal = FALSE, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWelch Two Sample t-test\n\ndata: d1p$WtGain and d1t$WtGain\nt = -0.6969, df = 29.694, p-value = 0.4913\nalternative hypothesis: true difference in means is not equal to 0\n95 percent confidence interval:\n -31.20849 15.33349\nsample estimates:\nmean of x mean of y \n 75.1875 83.1250 \n```\n\n\n:::\n:::\n\n\n\n## Procs Package\n\n### Student's T-Test and Welch's T-Test {#procs}\n\n#### Code\n\nThe following code from the **procs** package was used to perform a two sample t-test. Note that the `proc_ttest()` function performs both the Student's t-test and Welch's t-test (the Satterthwaite approximation is used to calculate the effective degrees of freedom) in the same call. The results are displayed on separate rows. This output is similar to SAS.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform t-test\nprocs::proc_ttest(d1, var = WtGain, class = trt_grp)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR CLASS METHOD N MEAN STD STDERR MIN MAX\n1 WtGain placebo 16 75.1875 33.81167 8.452918 12 130\n2 WtGain treatment 16 83.1250 30.53495 7.633738 28 128\n3 WtGain Diff (1-2) Pooled NA -7.9375 NA 11.389723 NA NA\n4 WtGain Diff (1-2) Satterthwaite NA -7.9375 NA 11.389723 NA NA\n\n$ConfLimits\n VAR CLASS METHOD MEAN LCLM UCLM STD LCLMSTD\n1 WtGain placebo 75.1875 57.17053 93.20447 33.81167 24.97685\n2 WtGain treatment 83.1250 66.85407 99.39593 30.53495 22.55632\n3 WtGain Diff (1-2) Pooled -7.9375 -31.19842 15.32342 NA NA\n4 WtGain Diff (1-2) Satterthwaite -7.9375 -31.20849 15.33349 NA NA\n UCLMSTD\n1 52.33003\n2 47.25868\n3 NA\n4 NA\n\n$TTests\n VAR METHOD VARIANCES DF T PROBT\n1 WtGain Pooled Equal 30.00000 -0.6969002 0.4912306\n2 WtGain Satterthwaite Unequal 29.69359 -0.6969002 0.4912856\n\n$Equality\n VAR METHOD NDF DDF FVAL PROBF\n1 WtGain Folded F 15 15 1.226136 0.6980614\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/twosample_rtest1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n## Example with unequal variances\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\nd2 <- tibble::tribble(\n ~trt_grp,\t~WtGain,\n \"placebo\",\t14, \"placebo\",\t15, \"placebo\",\t15, \"placebo\",\t15,\n \"placebo\",\t16, \"placebo\",\t18, \"placebo\",\t22, \"placebo\",\t23,\n \"placebo\",\t24, \"placebo\",\t25, \"placebo\",\t25,\n \"treatment\",\t10, \"treatment\",\t12, \"treatment\",\t14, \"treatment\",\t15,\n \"treatment\",\t18, \"treatment\",\t22, \"treatment\",\t24, \"treatment\",\t27,\n \"treatment\",\t31, \"treatment\",\t33, \"treatment\",\t34, \"treatment\",\t34,\n \"treatment\",\t34,\n)\n\nd2p <- dplyr::filter(d2, trt_grp == 'placebo')\nd2t <- dplyr::filter(d2, trt_grp == 'treatment')\n\n# Perform t-test\nstats::t.test(d2p$WtGain, d2t$WtGain, var.equal = FALSE, paired = FALSE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tWelch Two Sample t-test\n\ndata: d2p$WtGain and d2t$WtGain\nt = -1.5379, df = 18.137, p-value = 0.1413\nalternative hypothesis: true difference in means is not equal to 0\n95 percent confidence interval:\n -10.453875 1.614714\nsample estimates:\nmean of x mean of y \n 19.27273 23.69231 \n```\n\n\n:::\n\n```{.r .cell-code}\nprocs::proc_ttest(d2, var = WtGain, class = trt_grp)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR CLASS METHOD N MEAN STD STDERR MIN MAX\n1 WtGain placebo 11 19.27273 4.518648 1.362424 14 25\n2 WtGain treatment 13 23.69231 9.123090 2.530290 10 34\n3 WtGain Diff (1-2) Pooled NA -4.41958 NA 3.029355 NA NA\n4 WtGain Diff (1-2) Satterthwaite NA -4.41958 NA 2.873772 NA NA\n\n$ConfLimits\n VAR CLASS METHOD MEAN LCLM UCLM STD\n1 WtGain placebo 19.27273 16.23706 22.308396 4.518648\n2 WtGain treatment 23.69231 18.17928 29.205336 9.123090\n3 WtGain Diff (1-2) Pooled -4.41958 -10.70208 1.862918 NA\n4 WtGain Diff (1-2) Satterthwaite -4.41958 -10.45387 1.614714 NA\n LCLMSTD UCLMSTD\n1 3.157257 7.929927\n2 6.542040 15.059805\n3 NA NA\n4 NA NA\n\n$TTests\n VAR METHOD VARIANCES DF T PROBT\n1 WtGain Pooled Equal 22.00000 -1.458918 0.1587188\n2 WtGain Satterthwaite Unequal 18.13738 -1.537902 0.1413355\n\n$Equality\n VAR METHOD NDF DDF FVAL PROBF\n1 WtGain Folded F 12 10 4.076307 0.03338774\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/twosample_rtest2.png){fig-align='center' width=70%}\n:::\n:::\n\n\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n P common * 1.1.4 2025-12-08 [?] RSPM (R 4.5.0)\n P crayon 1.5.3 2024-06-20 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P fmtr 1.7.2 2026-01-25 [?] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n P jpeg 0.1-11 2025-03-21 [?] RSPM (R 4.5.0)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n P mvtnorm 1.3-3 2025-01-10 [?] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P procs * 1.0.7 2025-07-27 [?] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n P Rcpp 1.1.1 2026-01-10 [?] RSPM (R 4.5.0)\n P reporter 1.4.6 2026-02-07 [?] RSPM (R 4.5.0)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P sasLM 0.10.7 2025-09-28 [?] RSPM (R 4.5.0)\n P stringi 1.8.7 2025-03-27 [?] RSPM (R 4.5.0)\n P tibble * 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n utf8 1.2.6 2025-06-08 [1] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P zip 2.3.3 2025-05-13 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/R/ttest_Paired/execute-results/html.json b/_freeze/R/ttest_Paired/execute-results/html.json index 3a78cd3dd..ccbce9ece 100644 --- a/_freeze/R/ttest_Paired/execute-results/html.json +++ b/_freeze/R/ttest_Paired/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "44cca5edb205237df3eea7113f6a9d26", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Paired t-test\"\n---\n\n\n\n# **Paired t-test in R**\n\nThe Paired t-test is used when two samples are naturally correlated. In the Paired t-test, the difference of the means between the two samples is compared to a given number that represents the null hypothesis. For a Paired t-test, the number of observations in each sample must be equal.\n\nIn R, a Paired t-test can be performed using the Base R `t.test()` from the **stats** package or the `proc_ttest()` function from the **procs** package.\n\n## Normal Data {#normal}\n\nBy default, the R paired t-test functions assume normality in the data and use a classic Student's t-test.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\npressure <- tibble::tribble(\n ~SBPbefore, ~SBPafter,\n 120, 128, \n 124, 131, \n 130, 131, \n 118, 127,\n 140, 132, \n 128, 125, \n 140, 141, \n 135, 137,\n 126, 118, \n 130, 132, \n 126, 129, \n 127, 135\n)\n```\n:::\n\n\n### Base R\n\n#### Code\n\nThe following code was used to test the comparison in Base R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform t-test\nstats::t.test(pressure$SBPbefore, pressure$SBPafter, paired = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tPaired t-test\n\ndata: pressure$SBPbefore and pressure$SBPafter\nt = -1.0896, df = 11, p-value = 0.2992\nalternative hypothesis: true mean difference is not equal to 0\n95 percent confidence interval:\n -5.536492 1.869825\nsample estimates:\nmean difference \n -1.833333 \n```\n\n\n:::\n:::\n\n\n### Procs Package\n\n#### Code\n\nThe following code from the **procs** package was used to perform a paired t-test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(procs)\n\n# Perform t-test\nprocs::proc_ttest(pressure, paired = \"SBPbefore*SBPafter\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR1 VAR2 DIFF N MEAN STD STDERR MIN MAX\n1 SBPbefore SBPafter SBPbefore-SBPafter 12 -1.833333 5.828353 1.682501 -9 8\n\n$ConfLimits\n VAR1 VAR2 DIFF MEAN LCLM UCLM STD\n1 SBPbefore SBPafter SBPbefore-SBPafter -1.833333 -5.536492 1.869825 5.828353\n LCLMSTD UCLMSTD\n1 4.128777 9.895832\n\n$TTests\n VAR1 VAR2 DIFF DF T PROBT\n1 SBPbefore SBPafter SBPbefore-SBPafter 11 -1.089648 0.2991635\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/paired_rtest1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe Base R `t.test()` function does not have an option for lognormal data. Likewise, the **procs** `proc_ttest()` function also does not have an option for lognormal data.\n\nOne possibility may be the `tTestLnormAltPower()` function from the **EnvStats** package. This package has not been evaluated yet.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P procs * 1.0.7 2025-07-27 [?] RSPM\n P tibble 3.3.1 2026-01-11 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"Paired t-test\"\n---\n\n\n\n# **Paired t-test in R**\n\nThe Paired t-test is used when two samples are naturally correlated. In the Paired t-test, the difference of the means between the two samples is compared to a given number that represents the null hypothesis. For a Paired t-test, the number of observations in each sample must be equal.\n\nIn R, a Paired t-test can be performed using the Base R `t.test()` from the **stats** package or the `proc_ttest()` function from the **procs** package.\n\n## Normal Data {#normal}\n\nBy default, the R paired t-test functions assume normality in the data and use a classic Student's t-test.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create sample data\npressure <- tibble::tribble(\n ~SBPbefore, ~SBPafter,\n 120, 128, \n 124, 131, \n 130, 131, \n 118, 127,\n 140, 132, \n 128, 125, \n 140, 141, \n 135, 137,\n 126, 118, \n 130, 132, \n 126, 129, \n 127, 135\n)\n```\n:::\n\n\n### Base R\n\n#### Code\n\nThe following code was used to test the comparison in Base R.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Perform t-test\nstats::t.test(pressure$SBPbefore, pressure$SBPafter, paired = TRUE)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\n\tPaired t-test\n\ndata: pressure$SBPbefore and pressure$SBPafter\nt = -1.0896, df = 11, p-value = 0.2992\nalternative hypothesis: true mean difference is not equal to 0\n95 percent confidence interval:\n -5.536492 1.869825\nsample estimates:\nmean difference \n -1.833333 \n```\n\n\n:::\n:::\n\n\n### Procs Package\n\n#### Code\n\nThe following code from the **procs** package was used to perform a paired t-test.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(procs)\n\n# Perform t-test\nprocs::proc_ttest(pressure, paired = \"SBPbefore*SBPafter\")\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n$Statistics\n VAR1 VAR2 DIFF N MEAN STD STDERR MIN MAX\n1 SBPbefore SBPafter SBPbefore-SBPafter 12 -1.833333 5.828353 1.682501 -9 8\n\n$ConfLimits\n VAR1 VAR2 DIFF MEAN LCLM UCLM STD\n1 SBPbefore SBPafter SBPbefore-SBPafter -1.833333 -5.536492 1.869825 5.828353\n LCLMSTD UCLMSTD\n1 4.128777 9.895832\n\n$TTests\n VAR1 VAR2 DIFF DF T PROBT\n1 SBPbefore SBPafter SBPbefore-SBPafter 11 -1.089648 0.2991635\n```\n\n\n:::\n:::\n\n\nViewer Output:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/paired_rtest1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe Base R `t.test()` function does not have an option for lognormal data. Likewise, the **procs** `proc_ttest()` function also does not have an option for lognormal data.\n\nOne possibility may be the `tTestLnormAltPower()` function from the **EnvStats** package. This package has not been evaluated yet.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P procs * 1.0.7 2025-07-27 [?] RSPM (R 4.5.0)\n P tibble 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/R/xgboost/execute-results/html.json b/_freeze/R/xgboost/execute-results/html.json index b73653aaf..41156ea8e 100644 --- a/_freeze/R/xgboost/execute-results/html.json +++ b/_freeze/R/xgboost/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "07f5646761935976ebc0c6e3e42de1d5", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"XGBoost\"\n---\n\n# XGBoost\n\nXGBoost which stands for eXtreme Gradient Boosting is an efficent implementation of gradient boosting. Gradient boosting is an ensemble technique in machine learning. Unlike traditional models that learn from the data independently, boosting combines the predictions of multiple weak learners to create a single, more accurate strong learner.\n\nAn XGBoost model is based on trees, so we don’t need to do much preprocessing for our data; we don’t need to worry about the factors or centering or scaling our data.\n\n## Available R packages\n\nThere are multiple packages that can be used to to implement xgboost in R.\n\n- [{tidymodels}](https://www.tidymodels.org/)\n- [{xgboost}](https://cran.r-project.org/web/packages/xgboost/index.html)\n- [{caret}](https://cran.r-project.org/web/packages/caret/index.html)\n\n{tidymodels} and {caret} easy ways to access xgboost easily. This example will use {tidymodels} because of the functionality included in {tidymodels} and is being heavily supported by Posit. {caret} was the precursor to {tidymodels} and it is recommended that you use {tidymodels} over {caret} as no new features are being added. \n\n## Data used\n\nData used for this example is `birthwt` which is part of the {MASS} package. This data-set considers a number of risk factors associated with birth weight in infants.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(MASS)\nlibrary(rsample)\nlibrary(parsnip)\nlibrary(xgboost)\n\nhead(birthwt)\n```\n:::\n\n\nOur modeling goal using the `birthwt` dataset is to predict whether the birth weight is low or not low based on factors such as mother's age, smoking status, and history of hypertension.\n\n## Example Code\n\nUse {tidymodels} metadata package to split the data into training and testing data. For classification, we need to change the Low variable into a factor, since currently coded as an integer (0,1).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbirthwt <- birthwt |>\n mutate(\n low_f = lvls_revalue(factor(low), c(\"Not Low\", \"Low\")),\n smoke_f = lvls_revalue(factor(smoke), c(\"Non-smoker\", \"Smoker\"))\n )\n\nbrthwt_split <- rsample::initial_split(birthwt, strata = low)\nbrthwt_train <- rsample::training(brthwt_split)\nbrthwt_test <- rsample::testing(brthwt_split)\n```\n:::\n\n\n### Classification\n\nAfter creating the data split, we setup the params of the model.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nxgboost_spec <- parsnip::boost_tree(trees = 15) |>\n # This model can be used for classification or regression, so set mode\n parsnip::set_mode(\"classification\") |>\n parsnip::set_engine(\"xgboost\")\n\nxgboost_spec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nBoosted Tree Model Specification (classification)\n\nMain Arguments:\n trees = 15\n\nComputational engine: xgboost \n```\n\n\n:::\n\n```{.r .cell-code}\nxgboost_cls_fit <- xgboost_spec |>\n fit(low_f ~ ., data = brthwt_train)\nxgboost_cls_fit\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nparsnip model object\n\n##### xgb.Booster\ncall:\n xgboost::xgb.train(params = list(eta = 0.3, max_depth = 6, gamma = 0, \n colsample_bytree = 1, colsample_bynode = 1, min_child_weight = 1, \n subsample = 1, nthread = 1, objective = \"binary:logistic\"), \n data = x$data, nrounds = 15, evals = x$watchlist, verbose = 0)\n# of features: 12 \n# of rounds: 15 \ncallbacks:\n evaluation_log \nevaluation_log:\n iter training_logloss\n \n 1 0.38431863\n 2 0.26508682\n --- ---\n 14 0.01449745\n 15 0.01449661\n```\n\n\n:::\n\n```{.r .cell-code}\nbind_cols(\n predict(xgboost_cls_fit, brthwt_test),\n predict(xgboost_cls_fit, brthwt_test, type = \"prob\")\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 48 × 3\n .pred_class `.pred_Not Low` .pred_Low\n \n 1 Not Low 0.989 0.0106\n 2 Not Low 0.989 0.0106\n 3 Not Low 0.989 0.0106\n 4 Not Low 0.989 0.0106\n 5 Not Low 0.989 0.0106\n 6 Not Low 0.989 0.0106\n 7 Not Low 0.989 0.0106\n 8 Not Low 0.989 0.0106\n 9 Not Low 0.989 0.0106\n10 Not Low 0.989 0.0106\n# ℹ 38 more rows\n```\n\n\n:::\n:::\n\n\n### Regression\n\nTo perform xgboost with regression, when setting up the parameter of the model, set the mode of xgboost to regression. After that switch and then changing the variable of interest back to an integer, the rest of the code is the same.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nxgboost_reg_spec <- parsnip::boost_tree(trees = 15) |>\n # This model can be used for classification or regression, so set mode\n parsnip::set_mode(\"regression\") |>\n parsnip::set_engine(\"xgboost\")\n\nxgboost_reg_spec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nBoosted Tree Model Specification (regression)\n\nMain Arguments:\n trees = 15\n\nComputational engine: xgboost \n```\n\n\n:::\n\n```{.r .cell-code}\n# For a regression model, the outcome should be `numeric`, not a `factor`.\nxgboost_reg_fit <- xgboost_reg_spec |>\n fit(low ~ ., data = brthwt_train)\nxgboost_reg_fit\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nparsnip model object\n\n##### xgb.Booster\ncall:\n xgboost::xgb.train(params = list(eta = 0.3, max_depth = 6, gamma = 0, \n colsample_bytree = 1, colsample_bynode = 1, min_child_weight = 1, \n subsample = 1, nthread = 1, objective = \"reg:squarederror\"), \n data = x$data, nrounds = 15, evals = x$watchlist, verbose = 0)\n# of features: 13 \n# of rounds: 15 \ncallbacks:\n evaluation_log \nevaluation_log:\n iter training_rmse\n \n 1 0.326901497\n 2 0.230645832\n --- ---\n 14 0.003512880\n 15 0.002478882\n```\n\n\n:::\n\n```{.r .cell-code}\npredict(xgboost_reg_fit, brthwt_test)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 48 × 1\n .pred\n \n 1 0.996 \n 2 0.00158\n 3 0.00158\n 4 0.00158\n 5 0.00158\n 6 0.00158\n 7 0.00158\n 8 0.00158\n 9 0.00158\n10 0.00158\n# ℹ 38 more rows\n```\n\n\n:::\n:::\n\n\n## Reference\n\n- [XGBoost with tidymodels by Julia Silge](https://juliasilge.com/blog/xgboost-tune-volleyball/)\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P data.table 1.18.2.1 2026-01-27 [?] RSPM\n P digest 0.6.39 2025-11-19 [?] RSPM\n P dplyr * 1.2.0 2026-02-03 [?] RSPM\n P evaluate 1.0.5 2025-08-27 [?] RSPM\n P farver 2.1.2 2024-05-13 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P forcats * 1.0.1 2025-09-25 [?] RSPM\n P furrr 0.3.1 2022-08-15 [?] RSPM\n P future 1.69.0 2026-01-16 [?] RSPM\n P generics 0.1.4 2025-05-09 [?] RSPM\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM\n P globals 0.19.0 2026-02-02 [?] RSPM\n P glue 1.8.0 2024-09-30 [?] RSPM\n P gtable 0.3.6 2024-10-25 [?] RSPM\n P hardhat 1.4.2 2025-08-20 [?] RSPM\n P hms 1.1.4 2025-10-17 [?] RSPM\n P htmltools 0.5.9 2025-12-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P jsonlite 2.0.0 2025-03-27 [?] RSPM\n P knitr 1.51 2025-12-20 [?] RSPM\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM\n P listenv 0.10.0 2025-11-02 [?] RSPM\n P lubridate * 1.9.5 2026-02-04 [?] RSPM\n P magrittr 2.0.4 2025-09-12 [?] RSPM\n MASS * 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM\n P parallelly 1.46.1 2026-01-08 [?] RSPM\n P parsnip * 1.4.1 2026-01-11 [?] RSPM\n P pillar 1.11.1 2025-09-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P purrr * 1.2.1 2026-01-09 [?] RSPM\n P R6 2.6.1 2025-02-15 [?] RSPM\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM\n P readr * 2.1.6 2025-11-14 [?] RSPM\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM\n P rmarkdown 2.30 2025-09-28 [?] RSPM\n P rsample * 1.3.2 2026-01-30 [?] RSPM\n P S7 0.2.1 2025-11-14 [?] RSPM\n P scales 1.4.0 2025-04-24 [?] RSPM\n P sessioninfo 1.2.3 2025-02-05 [?] RSPM\n P sparsevctrs 0.3.6 2026-01-27 [?] RSPM\n P stringi 1.8.7 2025-03-27 [?] RSPM\n P stringr * 1.6.0 2025-11-04 [?] RSPM\n P tibble * 3.3.1 2026-01-11 [?] RSPM\n P tidyr * 1.3.2 2025-12-19 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM\n P timechange 0.4.0 2026-01-29 [?] RSPM\n P tzdb 0.5.0 2025-03-15 [?] RSPM\n P utf8 1.2.6 2025-06-08 [?] RSPM\n P vctrs 0.7.1 2026-01-23 [?] RSPM\n P withr 3.0.2 2024-10-28 [?] RSPM\n P xfun 0.56 2026-01-18 [?] RSPM\n P xgboost * 3.2.0.1 2026-02-10 [?] RSPM\n P yaml 2.3.12 2025-12-10 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", + "markdown": "---\ntitle: \"XGBoost\"\n---\n\n# XGBoost\n\nXGBoost which stands for eXtreme Gradient Boosting is an efficent implementation of gradient boosting. Gradient boosting is an ensemble technique in machine learning. Unlike traditional models that learn from the data independently, boosting combines the predictions of multiple weak learners to create a single, more accurate strong learner.\n\nAn XGBoost model is based on trees, so we don’t need to do much preprocessing for our data; we don’t need to worry about the factors or centering or scaling our data.\n\n## Available R packages\n\nThere are multiple packages that can be used to to implement xgboost in R.\n\n- [{tidymodels}](https://www.tidymodels.org/)\n- [{xgboost}](https://cran.r-project.org/web/packages/xgboost/index.html)\n- [{caret}](https://cran.r-project.org/web/packages/caret/index.html)\n\n{tidymodels} and {caret} easy ways to access xgboost easily. This example will use {tidymodels} because of the functionality included in {tidymodels} and is being heavily supported by Posit. {caret} was the precursor to {tidymodels} and it is recommended that you use {tidymodels} over {caret} as no new features are being added. \n\n## Data used\n\nData used for this example is `birthwt` which is part of the {MASS} package. This data-set considers a number of risk factors associated with birth weight in infants.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(MASS)\nlibrary(rsample)\nlibrary(parsnip)\nlibrary(xgboost)\n\nhead(birthwt)\n```\n:::\n\n\nOur modeling goal using the `birthwt` dataset is to predict whether the birth weight is low or not low based on factors such as mother's age, smoking status, and history of hypertension.\n\n## Example Code\n\nUse {tidymodels} metadata package to split the data into training and testing data. For classification, we need to change the Low variable into a factor, since currently coded as an integer (0,1).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbirthwt <- birthwt |>\n mutate(\n low_f = lvls_revalue(factor(low), c(\"Not Low\", \"Low\")),\n smoke_f = lvls_revalue(factor(smoke), c(\"Non-smoker\", \"Smoker\"))\n )\n\nbrthwt_split <- rsample::initial_split(birthwt, strata = low)\nbrthwt_train <- rsample::training(brthwt_split)\nbrthwt_test <- rsample::testing(brthwt_split)\n```\n:::\n\n\n### Classification\n\nAfter creating the data split, we setup the params of the model.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nxgboost_spec <- parsnip::boost_tree(trees = 15) |>\n # This model can be used for classification or regression, so set mode\n parsnip::set_mode(\"classification\") |>\n parsnip::set_engine(\"xgboost\")\n\nxgboost_spec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nBoosted Tree Model Specification (classification)\n\nMain Arguments:\n trees = 15\n\nComputational engine: xgboost \n```\n\n\n:::\n\n```{.r .cell-code}\nxgboost_cls_fit <- xgboost_spec |>\n fit(low_f ~ ., data = brthwt_train)\nxgboost_cls_fit\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nparsnip model object\n\n##### xgb.Booster\ncall:\n xgboost::xgb.train(params = list(eta = 0.3, max_depth = 6, gamma = 0, \n colsample_bytree = 1, colsample_bynode = 1, min_child_weight = 1, \n subsample = 1, nthread = 1, objective = \"binary:logistic\"), \n data = x$data, nrounds = 15, evals = x$watchlist, verbose = 0)\n# of features: 12 \n# of rounds: 15 \ncallbacks:\n evaluation_log \nevaluation_log:\n iter training_logloss\n \n 1 0.38431863\n 2 0.26508682\n --- ---\n 14 0.01449745\n 15 0.01449661\n```\n\n\n:::\n\n```{.r .cell-code}\nbind_cols(\n predict(xgboost_cls_fit, brthwt_test),\n predict(xgboost_cls_fit, brthwt_test, type = \"prob\")\n)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 48 × 3\n .pred_class `.pred_Not Low` .pred_Low\n \n 1 Not Low 0.989 0.0106\n 2 Not Low 0.989 0.0106\n 3 Not Low 0.989 0.0106\n 4 Not Low 0.989 0.0106\n 5 Not Low 0.989 0.0106\n 6 Not Low 0.989 0.0106\n 7 Not Low 0.989 0.0106\n 8 Not Low 0.989 0.0106\n 9 Not Low 0.989 0.0106\n10 Not Low 0.989 0.0106\n# ℹ 38 more rows\n```\n\n\n:::\n:::\n\n\n### Regression\n\nTo perform xgboost with regression, when setting up the parameter of the model, set the mode of xgboost to regression. After that switch and then changing the variable of interest back to an integer, the rest of the code is the same.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nxgboost_reg_spec <- parsnip::boost_tree(trees = 15) |>\n # This model can be used for classification or regression, so set mode\n parsnip::set_mode(\"regression\") |>\n parsnip::set_engine(\"xgboost\")\n\nxgboost_reg_spec\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nBoosted Tree Model Specification (regression)\n\nMain Arguments:\n trees = 15\n\nComputational engine: xgboost \n```\n\n\n:::\n\n```{.r .cell-code}\n# For a regression model, the outcome should be `numeric`, not a `factor`.\nxgboost_reg_fit <- xgboost_reg_spec |>\n fit(low ~ ., data = brthwt_train)\nxgboost_reg_fit\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nparsnip model object\n\n##### xgb.Booster\ncall:\n xgboost::xgb.train(params = list(eta = 0.3, max_depth = 6, gamma = 0, \n colsample_bytree = 1, colsample_bynode = 1, min_child_weight = 1, \n subsample = 1, nthread = 1, objective = \"reg:squarederror\"), \n data = x$data, nrounds = 15, evals = x$watchlist, verbose = 0)\n# of features: 13 \n# of rounds: 15 \ncallbacks:\n evaluation_log \nevaluation_log:\n iter training_rmse\n \n 1 0.326901497\n 2 0.230645832\n --- ---\n 14 0.003512880\n 15 0.002478882\n```\n\n\n:::\n\n```{.r .cell-code}\npredict(xgboost_reg_fit, brthwt_test)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 48 × 1\n .pred\n \n 1 0.00158\n 2 0.00158\n 3 0.00158\n 4 0.00158\n 5 0.00158\n 6 0.00158\n 7 0.00158\n 8 0.00158\n 9 0.00158\n10 0.00158\n# ℹ 38 more rows\n```\n\n\n:::\n:::\n\n\n## Reference\n\n- [XGBoost with tidymodels by Julia Silge](https://juliasilge.com/blog/xgboost-tune-volleyball/)\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.5 2025-04-23 [?] RSPM (R 4.5.0)\n codetools 0.2-20 2024-03-31 [2] CRAN (R 4.5.2)\n P data.table 1.18.2.1 2026-01-27 [?] RSPM (R 4.5.0)\n P digest 0.6.39 2025-11-19 [?] RSPM (R 4.5.0)\n P dplyr * 1.2.0 2026-02-03 [?] RSPM (R 4.5.0)\n P evaluate 1.0.5 2025-08-27 [?] RSPM (R 4.5.0)\n P farver 2.1.2 2024-05-13 [?] RSPM (R 4.5.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.5.0)\n P forcats * 1.0.1 2025-09-25 [?] RSPM (R 4.5.0)\n P furrr 0.3.1 2022-08-15 [?] RSPM (R 4.5.0)\n P future 1.69.0 2026-01-16 [?] RSPM (R 4.5.0)\n P generics 0.1.4 2025-05-09 [?] RSPM (R 4.5.0)\n P ggplot2 * 4.0.2 2026-02-03 [?] RSPM (R 4.5.0)\n P globals 0.19.0 2026-02-02 [?] RSPM (R 4.5.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.5.0)\n P gtable 0.3.6 2024-10-25 [?] RSPM (R 4.5.0)\n P hardhat 1.4.2 2025-08-20 [?] RSPM (R 4.5.0)\n P hms 1.1.4 2025-10-17 [?] RSPM (R 4.5.0)\n P htmltools 0.5.9 2025-12-04 [?] RSPM (R 4.5.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.5.0)\n P jsonlite 2.0.0 2025-03-27 [?] RSPM (R 4.5.0)\n P knitr 1.51 2025-12-20 [?] RSPM (R 4.5.0)\n lattice 0.22-7 2025-04-02 [2] CRAN (R 4.5.2)\n P lifecycle 1.0.5 2026-01-08 [?] RSPM (R 4.5.0)\n P listenv 0.10.0 2025-11-02 [?] RSPM (R 4.5.0)\n P lubridate * 1.9.5 2026-02-04 [?] RSPM (R 4.5.0)\n P magrittr 2.0.4 2025-09-12 [?] RSPM (R 4.5.0)\n MASS * 7.3-65 2025-02-28 [2] CRAN (R 4.5.2)\n Matrix 1.7-4 2025-08-28 [2] CRAN (R 4.5.2)\n P otel 0.2.0 2025-08-29 [?] RSPM (R 4.5.0)\n P parallelly 1.46.1 2026-01-08 [?] RSPM (R 4.5.0)\n P parsnip * 1.4.1 2026-01-11 [?] RSPM (R 4.5.0)\n P pillar 1.11.1 2025-09-17 [?] RSPM (R 4.5.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.5.0)\n P purrr * 1.2.1 2026-01-09 [?] RSPM (R 4.5.0)\n P R6 2.6.1 2025-02-15 [?] RSPM (R 4.5.0)\n P RColorBrewer 1.1-3 2022-04-03 [?] RSPM (R 4.5.0)\n P readr * 2.1.6 2025-11-14 [?] RSPM (R 4.5.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.5.2)\n P rlang 1.1.7 2026-01-09 [?] RSPM (R 4.5.0)\n P rmarkdown 2.30 2025-09-28 [?] RSPM (R 4.5.0)\n P rsample * 1.3.2 2026-01-30 [?] RSPM (R 4.5.0)\n P S7 0.2.1 2025-11-14 [?] RSPM (R 4.5.0)\n P scales 1.4.0 2025-04-24 [?] RSPM (R 4.5.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.5.0)\n P sparsevctrs 0.3.6 2026-01-27 [?] RSPM (R 4.5.0)\n P stringi 1.8.7 2025-03-27 [?] RSPM (R 4.5.0)\n P stringr * 1.6.0 2025-11-04 [?] RSPM (R 4.5.0)\n P tibble * 3.3.1 2026-01-11 [?] RSPM (R 4.5.0)\n P tidyr * 1.3.2 2025-12-19 [?] RSPM (R 4.5.0)\n P tidyselect 1.2.1 2024-03-11 [?] RSPM (R 4.5.0)\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM (R 4.5.0)\n P timechange 0.4.0 2026-01-29 [?] RSPM (R 4.5.0)\n P tzdb 0.5.0 2025-03-15 [?] RSPM (R 4.5.0)\n P utf8 1.2.6 2025-06-08 [?] RSPM (R 4.5.0)\n P vctrs 0.7.1 2026-01-23 [?] RSPM (R 4.5.0)\n P withr 3.0.2 2024-10-28 [?] RSPM (R 4.5.0)\n P xfun 0.56 2026-01-18 [?] RSPM (R 4.5.0)\n P xgboost * 3.2.0.1 2026-02-10 [?] RSPM (R 4.5.0)\n P yaml 2.3.12 2025-12-10 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/SAS_Friedmantest/execute-results/html.json b/_freeze/SAS/SAS_Friedmantest/execute-results/html.json deleted file mode 100644 index 9a6f07ba8..000000000 --- a/_freeze/SAS/SAS_Friedmantest/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "49c561103bb5d8d61be395d4c797ba86", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Friedman Chi-Square test using SAS\"\nexecute: \n eval: false\n---\n\n# Introduction\n\nThe Friedman test is a non-parametric statistical test developed by Milton Friedman similar to the parametric repeated measures ANOVA. It is used to detect differences in groups across multiple blocks. The procedure involves ranking each row (or block) together, then considering the values of ranks by columns. Applicable to complete block designs, it is thus a special case of the Durbin test.\n\nThe Friedman test is used for one-way repeated measures analysis of variance by ranks. In its use of ranks it is similar to the Kruskal--Wallis one-way analysis of variance by ranks.\n\n## SAS version\n\nSAS 9.4\n\n## Data used\n\nSimulated dataset of 10 subjects(blocks) with continuous endpoints are generated for single-drug repeated measurements to check whether any significance exists between the responses(y) at different time points(4 time points simulated)(groups). The p-value will indicate whether differences in response for different time points are significant.\n\n## Data source\n\n```{sas}\ndata one_way_repeat;\n do subject = 1 to 10;\n do timepoint = 1 to 4;\n response = round(rand('Uniform',10,50));\n output;\n end;\n end;\nrun;\n\nproc print;\nrun;\n```\n\n## Overview\n\nThe `FREQ` procedure computes CMH statistic, Friedman's test is identical to the ANOVA (row means scores) CMH statistic when the analysis uses rank scores (SCORES=RANK). The TABLES statement creates a three-way table i.e., timepoint and response stratified by subject. The output produces following statistics along with its degrees of freedom and p-value(Prob):\n\n- Nonzero Correlation\n\n- Row Mean Scores Differ\n\nThe row corresponding to 'Row Mean Scores Differ' gives the required statistic and p-value for Friedman's test.\n\n## Handling missing Values\n\nWhen the data contains missing response, the procedure discards the corresponding row and calculates the required statistic with a message about number of missing responses below the test statisitc output.\n\n## Example Code for Friedman Chi-square test\n\n```{sas}\nproc freq data=one_way_repeat;\n tables subject*timepoint*response / \n cmh2 scores=rank noprint;\nrun;\n```\n\n## Results\n\n``` default\n The FREQ Procedure\n\n Summary Statistics for timepoint by response\n Controlling for subject\n\n Cochran-Mantel-Haenszel Statistics (Based on Rank Scores)\n\n Statistic Alternative Hypothesis DF Value Prob\n ---------------------------------------------------------------\n 1 Nonzero Correlation 1 0.0276 0.8682\n 2 Row Mean Scores Differ 3 3.6429 0.3027\n\n\n Total Sample Size = 40\n```\n\n## References\n\n[Examples: FREQ Procedure (sas.com)](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_freq_sect033.htm)\n\n", - "supporting": [ - "SAS_Friedmantest_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/ancova/execute-results/html.json b/_freeze/SAS/ancova/execute-results/html.json index d6ab73545..29799bf2e 100644 --- a/_freeze/SAS/ancova/execute-results/html.json +++ b/_freeze/SAS/ancova/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "f9f65f4cf4b284827310b6aac440fad4", + "hash": "9b086e54d8bdd4bda5285cb111f9e4e3", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Ancova\"\ndate: \"2024-02-20\"\nexecute: \n eval: false\n---\n\n\n\n# **ANCOVA in SAS**\n\nIn SAS, there are several ways to perform ANCOVA analysis. One common way is to use PROC GLM with the LSMEANS option. The below example will use this method.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata DrugTest;\n input Drug $ PreTreatment PostTreatment @@;\n datalines;\n A 11 6 A 8 0 A 5 2 A 14 8 A 19 11\n A 6 4 A 10 13 A 6 1 A 11 8 A 3 0\n D 6 0 D 6 2 D 7 3 D 8 1 D 18 18\n D 8 4 D 19 14 D 8 9 D 5 1 D 15 9\n F 16 13 F 13 10 F 11 18 F 9 5 F 21 23\n F 16 12 F 12 5 F 12 16 F 7 1 F 12 20\n ;\n```\n:::\n\n\n### Code\n\nThe following code was used to test the effects of a drug pre and post treatment:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc glm data=DrugTest;\n class Drug;\n model PostTreatment = Drug PreTreatment / solution;\n lsmeans Drug / stderr pdiff cov out=adjmeans;\nrun;\nproc print data=adjmeans;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova0.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova1.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova2.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova3.png){fig-align='center' width=50%}\n:::\n:::\n\n\nAs can be seen in the images above, the GLM procedure provides multiple types of analysis to determine the relationship between the dependent and independent variables. The last step produces a table of LSMEANS and coefficient of variation values for each of the three different drugs in the dataset.", + "markdown": "---\ntitle: \"Ancova\"\ndate: \"2024-02-20\"\n---\n\n\n\n# **ANCOVA in SAS**\n\nIn SAS, there are several ways to perform ANCOVA analysis. One common way is to use PROC GLM with the LSMEANS option. The below example will use this method.\n\n### Data Used\n\nThe following data was used in this example.\n\n```sas\ndata DrugTest;\n input Drug $ PreTreatment PostTreatment @@;\n datalines;\n A 11 6 A 8 0 A 5 2 A 14 8 A 19 11\n A 6 4 A 10 13 A 6 1 A 11 8 A 3 0\n D 6 0 D 6 2 D 7 3 D 8 1 D 18 18\n D 8 4 D 19 14 D 8 9 D 5 1 D 15 9\n F 16 13 F 13 10 F 11 18 F 9 5 F 21 23\n F 16 12 F 12 5 F 12 16 F 7 1 F 12 20\n ;\n```\n\n### Code\n\nThe following code was used to test the effects of a drug pre and post treatment:\n\n```sas\nproc glm data=DrugTest;\n class Drug;\n model PostTreatment = Drug PreTreatment / solution;\n lsmeans Drug / stderr pdiff cov out=adjmeans;\nrun;\nproc print data=adjmeans;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova0.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova1.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova2.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ancova/sas_ancova3.png){fig-align='center' width=50%}\n:::\n:::\n\n\nAs can be seen in the images above, the GLM procedure provides multiple types of analysis to determine the relationship between the dependent and independent variables. The last step produces a table of LSMEANS and coefficient of variation values for each of the three different drugs in the dataset.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/anova/execute-results/html.json b/_freeze/SAS/anova/execute-results/html.json index eea009057..487fa93ea 100644 --- a/_freeze/SAS/anova/execute-results/html.json +++ b/_freeze/SAS/anova/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "460c059bda5f7212321294ceb71536af", + "hash": "e31e97e348407bc06c430b26f15cf3d1", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"ANOVA\"\nexecute: \n eval: false\n---\n\n### **Getting Started**\n\nTo demonstrate the various types of sums of squares, we'll create a data frame called `df_disease` taken from the SAS documentation.\n\n### The Model {.unnumbered}\n\nFor this example, we're testing for a significant difference in `stem_length` using ANOVA.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc glm data = disease;\n class drug disease;\n model y=drug disease drug*disease;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\n### Sums of Squares Tables {.unnumbered}\n\nSAS has four types of sums of squares calculations. To get these calculations, the sum of squares option needs to be added (`/ ss1 ss2 ss3 ss4`) to the model statement.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc glm;\n class drug disease;\n model y=drug disease drug*disease / ss1 ss2 ss3 ss4;\nrun;\n```\n:::\n\n\n#### Type I\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-1.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Type II {.unnumbered}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-2.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Type III {.unnumbered}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-3.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Type IV {.unnumbered}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-4.png){fig-align='center' width=75%}\n:::\n:::\n\n\n### Contrasts {.unnumbered}\n\nTo get contrasts in SAS, we use the `estimate` statement. For looking at contrast we are going to fit a different model on new data, that doesn't include an interaction term as it is easier to calculate contrasts without an interaction term. For this dataset we have three different drugs A, C, and E.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc glm data=testdata;\n class drug;\n model post = drug pre / solution;\n estimate 'C vs A' drug -1 1 0;\n estimate 'E vs CA' drug -1 -1 2;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/clipboard-1394032862.png){fig-align='center' width=75%}\n:::\n:::\n\n\nReference: [Sum of squares type I, II, and III](http://dwoll.de/rexrepos/posts/anovaSStypes.html)", + "markdown": "---\ntitle: \"ANOVA\"\n---\n\n### **Getting Started**\n\nTo demonstrate the various types of sums of squares, we'll create a data frame called `df_disease` taken from the SAS documentation.\n\n### The Model {.unnumbered}\n\nFor this example, we're testing for a significant difference in `stem_length` using ANOVA.\n\n```sas\nproc glm data = disease;\n class drug disease;\n model y=drug disease drug*disease;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-f-table.png){fig-align='center' width=90%}\n:::\n:::\n\n\n### Sums of Squares Tables {.unnumbered}\n\nSAS has four types of sums of squares calculations. To get these calculations, the sum of squares option needs to be added (`/ ss1 ss2 ss3 ss4`) to the model statement.\n\n```sas\nproc glm;\n class drug disease;\n model y=drug disease drug*disease / ss1 ss2 ss3 ss4;\nrun;\n```\n\n#### Type I\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-1.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Type II {.unnumbered}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-2.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Type III {.unnumbered}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-3.png){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Type IV {.unnumbered}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/sas-ss-type-4.png){fig-align='center' width=75%}\n:::\n:::\n\n\n### Contrasts {.unnumbered}\n\nTo get contrasts in SAS, we use the `estimate` statement. For looking at contrast we are going to fit a different model on new data, that doesn't include an interaction term as it is easier to calculate contrasts without an interaction term. For this dataset we have three different drugs A, C, and E.\n\n```sas\nproc glm data=testdata;\n class drug;\n model post = drug pre / solution;\n estimate 'C vs A' drug -1 1 0;\n estimate 'E vs CA' drug -1 -1 2;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/linear/clipboard-1394032862.png){fig-align='center' width=75%}\n:::\n:::\n\n\nReference: [Sum of squares type I, II, and III](http://dwoll.de/rexrepos/posts/anovaSStypes.html)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/association/execute-results/html.json b/_freeze/SAS/association/execute-results/html.json index 1e07aa038..79baa1ea0 100644 --- a/_freeze/SAS/association/execute-results/html.json +++ b/_freeze/SAS/association/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "b460d39336befd3418693a7e56e0fd4e", + "hash": "0b68f1a66c07711ddc0403db47d7793f", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Association Analysis for Count Data Using SAS\"\nexecute: \n eval: false\n---\n\nIn SAS, association analysis methods for count data/contingency tables is typically performed using the `PROC FREQ` procedure. This procedure has options for Chi-Square and Fisher's Exact tests.\n\n# Example: Lung Cancer Data\n\nThe following tabulation was used for the SAS Chi-Square and Fisher's testing. This tabulation was derived from the same `lung` dataset used for the R function testing. The dataset is defined as follows:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n\ndata test_case; \n input treatment $ Count Weight $; \n datalines; \n Trt_A 22 0\n Trt_B 39 0\n Trt_A 39 1\n Trt_B 113 1\n; \n```\n:::\n\n\n## Tests of Association\n\nThe following SAS code produces both the Chi-Square and Fisher's Exact tests of association. Note that the results contain many statistics not produced by the corresponding R function. The relevant sections of the output have been outlined in red.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data = test_case;\n weight Count;\n tables treatment * Weight / chisq fisher;\n exact or;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/association/sas_chisq_fisher.png){fig-align='center' width=50%}\n:::\n:::\n\n", + "markdown": "---\ntitle: \"Association Analysis for Count Data Using SAS\"\n---\n\nIn SAS, association analysis methods for count data/contingency tables is typically performed using the `PROC FREQ` procedure. This procedure has options for Chi-Square and Fisher's Exact tests.\n\n# Example: Lung Cancer Data\n\nThe following tabulation was used for the SAS Chi-Square and Fisher's testing. This tabulation was derived from the same `lung` dataset used for the R function testing. The dataset is defined as follows:\n\n```sas\n\ndata test_case; \n input treatment $ Count Weight $; \n datalines; \n Trt_A 22 0\n Trt_B 39 0\n Trt_A 39 1\n Trt_B 113 1\n; \n```\n\n## Tests of Association\n\nThe following SAS code produces both the Chi-Square and Fisher's Exact tests of association. Note that the results contain many statistics not produced by the corresponding R function. The relevant sections of the output have been outlined in red.\n\n```sas\nproc freq data = test_case;\n weight Count;\n tables treatment * Weight / chisq fisher;\n exact or;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/association/sas_chisq_fisher.png){fig-align='center' width=50%}\n:::\n:::\n", "supporting": [ "association_files" ], diff --git a/_freeze/SAS/binomial_test/execute-results/html.json b/_freeze/SAS/binomial_test/execute-results/html.json deleted file mode 100644 index 6980fed30..000000000 --- a/_freeze/SAS/binomial_test/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "7de0bd2e773e79cbf0a0a31c21b7fef5", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Binomial Test on Coin Flips and Clinical Data\"\nexecute: \n eval: false\n---\n\n## Simulating Coin Flips\n\nSet the seed for reproducibility and simulate 1000 coin flips using a Bernoulli distribution.\n\n```{sas}\n/* Set the seed for reproducibility */\n%let seed = 19;\n\ndata coin_flips;\n call streaminit(&seed);\n do i = 1 to 1000;\n /* Simulate coin flips: 1 for Heads (H), 0 for Tails (T) */\n flip = rand(\"Bernoulli\", 0.5);\n/* flip = rand(\"BINOMIAL\", 0.5,1); */\n if flip = 1 then result = \"H\";\n else result = \"T\";\n output;\n end;\nrun;\n```\n\n## Counting Heads and Tails\n\nUse SQL to count how many heads and tails were observed in the simulation.\n\n```{sas}\nproc sql;\n select \n sum(result = \"H\") as heads_count,\n sum(result = \"T\") as tails_count,\n count(*) as total_flips\n into :heads_count, :tails_count, :total_flips\n from coin_flips;\nquit;\n```\n\n## Display the Results\n\nPrint the counts using `%put` statements.\n\n```{sas}\n%put Heads Count: &heads_count;\n%put Tails Count: &tails_count;\n%put Total Flips: &total_flips;\n```\n\n## Perform Binomial Test on Coin Flip Results\n\nUse `proc freq` to check if the observed results differ significantly from the expected probability of 0.5.\n\n```{sas}\nproc freq data=coin_flips;\n tables result / binomial(p=0.5);\nrun;\n```\n\n## Example: Binomial Test in Clinical Trial Data\n\nWe load a clinical dataset and test if the observed death proportion is significantly different from a hypothesized value (e.g., 19%).\n\n### Import Dataset\n\n```{sas}\nproc import datafile='/home/u63532805/CAMIS/lung_cancer.csv'\n out=lung_cancer\n dbms=csv\n replace;\n getnames=yes;\nrun;\n```\n\n### Create Binary Flag for Deaths\n\n```{sas}\ndata lung_cancer;\n set lung_cancer;\n death_flag = (status = 1);\nrun;\n```\n\n### Perform Exact Binomial Test\n\n```{sas}\nproc freq data=lung_cancer;\n tables death_flag / binomial(p=0.19 level='1');\n title \"Exact Binomial Test for Death Proportion\";\nrun;\n```\n\n## SAS Output\n\n### Coin Flip Summary\n\n| heads_count | tails_count | total_flips |\n|-------------|-------------|-------------|\n| 520 | 480 | 1000 |\n\n### Binomial Test on Coin Flips\n\n**The FREQ Procedure**\n\n| result | Frequency | Percent | Cumulative Frequency | Cumulative Percent |\n|--------|-----------|---------|----------------------|--------------------|\n| H | 520 | 52.00 | 520 | 52.00 |\n| T | 480 | 48.00 | 1000 | 100.00 |\n\n**Binomial Proportion for result = H**\n\n- Proportion: 0.5200\\\n- ASE: 0.0158\\\n- 95% Lower Conf Limit: 0.4890\\\n- 95% Upper Conf Limit: 0.5510\n\n**Exact Confidence Limits**\n\n- 95% Lower Conf Limit: 0.4885\\\n- 95% Upper Conf Limit: 0.5514\n\n**Test of H0: Proportion = 0.5**\n\n- ASE under H0: 0.0158\\\n- Z: 1.2649\\\n- One-sided Pr \\> Z: 0.1030\\\n- Two-sided Pr \\> \\|Z\\|: 0.2059\\\n- Sample Size: 1000\n\n### Exact Binomial Test for Death Proportion\n\n**The FREQ Procedure**\n\n| death_flag | Frequency | Percent | Cumulative Frequency | Cumulative Percent |\n|------------|-----------|---------|----------------------|--------------------|\n| 0 | 165 | 72.37 | 165 | 72.37 |\n| 1 | 63 | 27.63 | 228 | 100.00 |\n\n**Binomial Proportion for death_flag = 1**\n\n- Proportion: 0.2763\\\n- ASE: 0.0296\\\n- 95% Lower Conf Limit: 0.2183\\\n- 95% Upper Conf Limit: 0.3344\n\n**Exact Confidence Limits**\n\n- 95% Lower Conf Limit: 0.2193\\\n- 95% Upper Conf Limit: 0.3392\n\n**Test of H0: Proportion = 0.19**\n\n- ASE under H0: 0.0260\\\n- Z: 3.3223\\\n- One-sided Pr \\> Z: 0.0004\\\n- Two-sided Pr \\> \\|Z\\|: 0.0009\\\n- Sample Size: 228\n\n", - "supporting": [ - "binomial_test_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/ci_for_2indep_prop/execute-results/html.json b/_freeze/SAS/ci_for_2indep_prop/execute-results/html.json index 508b5b83a..661704290 100644 --- a/_freeze/SAS/ci_for_2indep_prop/execute-results/html.json +++ b/_freeze/SAS/ci_for_2indep_prop/execute-results/html.json @@ -1,9 +1,11 @@ { - "hash": "d25ffa376f8f6c701d158c341a7ca206", + "hash": "1e337d47a590119b667ba7e53b9a3c7f", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Confidence Intervals for Independent Proportions in SAS\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n# Introduction\n\nThis page covers confidence intervals for comparisons of two independent proportions in SAS, including the contrast parameters for risk difference (RD) $\\theta_{RD} = p_1 - p_2$, relative risk (RR) $\\theta_{RR} = p_1 / p_2$, and odds ratio (OR) $\\theta_{OR} = p_1(1-p_2) / (p_2(1-p_1))$.\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nCaution is required if there are no responders (or all responders) in both groups, which might happen in a subgroup analysis for example. PROC FREQ (as of v9.4) does not output any confidence intervals in this case, when valid CIs can (and should) be reported for the RD contrast, since the dataset provides an estimate of zero for RD (and the confidence in the estimate is proportional to the sample size). Similarly, if $\\hat p_1 = \\hat p_2 = 1$ then an estimate and confidence interval can be obtained for RR, but not from PROC FREQ.\n\n# Data used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `Act` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n``` sas\nproc import datafile = 'data/adcibc.csv'\n out = adcibc\n dbms = csv\n replace;\n getnames = yes;\n guessingrows = max;\nrun;\n\ndata adcibc2 (keep=trt resp) ;\n set adcibc; \n if aval gt 4 then resp=\"Yes\";\n else resp=\"No\"; \n if trtp=\"Placebo\" then trt=\"PBO\";\n else trt=\"Act\"; \nrun;\n \n* Sort to ensure that the outcome of interest (\"Yes\" in this example) is first;\n* when using default COLUMN=1 option in the TABLES statement;\nproc sort data=adcibc2; \nby trt descending resp; \nrun; \n```\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects, p1 = 0.2338 (23.38% responders), while for the placebo treatment p2 = 12/77 = 0.1558, giving a risk difference of 0.0779, relative risk 1.50, and odds ratio 1.6525.\n\n``` sas\nproc freq data=adcibc2;\n table trt*resp/ nopct nocol;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/2by2crosstab.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# Methods for Calculating Confidence Intervals for Proportion Difference from 2 independent samples\n\nThis [paper](https://www.lexjansen.com/wuss/2016/127_Final_Paper_PDF.pdf) describes many methods for the calculation of confidence intervals for 2 independent proportions. The 2-sided and 1-sided performance of many of the same methods have been compared graphically[@laud2014]. According to a recent paper[@bai2021], the most commonly reported method in non-inferiority clinical trials for antibiotics is the Wald asymptotic normal approximation (despite its well-documented poor performance), followed by the Miettinen-Nurminen (asymptotic score) method. More recently, an improved variant of the Miettinen-Nurminen method (SCAS) was developed, by including a skewness correction designed to optimise the performance in terms of one-sided coverage for NI testing. SCAS corrects the slightly asymmetrical coverage of the Miettinen-Nurminen interval (note the skewness is more pronounced when analysing the RR contrast, and/or when group sizes are imbalanced).\n\nSAS PROC FREQ is able to calculate CIs for RD using the following methods: Agresti/Caffo (AC), Miettinen and Nurminen (MN or SCORE), Mee (MN(Mee)), Newcombe Hybrid Score (MOVER), and Wald. For conservative coverage, there is the 'Exact' method, or continuity-adjusted versions of the Wald and Newcombe methods, and also the Hauck-Anderson (HA) continuity-adjustment.\n\nThe SCAS method is not available in PROC FREQ, but can be produced using a SAS macro (`%SCORECI`) which can be downloaded from .\n\n## Normal Approximation Method (Also known as the Wald Method)\n\nThe difference between two independent sample proportions is calculated as: $\\hat \\theta_{RD} = \\hat p_1 - \\hat p_2 = x_1 / n_1 - x_2 / n_2$\n\nThe Wald CI for $\\theta_{RD}$ is calculated using:\n\n$\\hat \\theta_{RD} \\pm z_{\\alpha/2} \\times SE(\\hat \\theta_{RD})$,\n\nwhere $SE (\\hat \\theta_{RD}) = \\sqrt{( \\frac{\\hat p_1 (1-\\hat p_1)}{n_1} + \\frac{\\hat p_2 (1-\\hat p_2)}{n_2})}$\n\nWith continuity correction, the equation becomes\n\n$\\hat \\theta_{RD} \\pm (CC + z_{\\alpha/2} \\times SE(\\hat \\theta_{RD}))$,\n\nwhere $CC = \\frac{1}{2} (\\frac{1}{n_1} + \\frac{1}{n_2})$\n\n## Newcombe Method (Also known as the Hybrid Score method, Square-and-Add, or the Method of Variance Estimates Recovery (MOVER) )\n\nDerive the confidence intervals for the separate proportions in each group, $p_1$ and $p_2$, using the Wilson Score Method equations as described [here](ci_for_prop.html).\n\nLet $l_1$ = Lower CI for sample 1, and $u_1$ be the upper CI for sample 1.\n\nLet $l_2$ = Lower CI for sample 2, and $u_2$ be the upper CI for sample 2.\n\nLet D = $\\hat p_1 - \\hat p_2$ (the difference between the observed proportions)\n\nThe CI for $\\theta_{RD}$ the difference between two proportions is: $$ D - sqrt((\\hat p_1 - l_1)^2+(u_2 - \\hat p_2)^2) \\quad, \\quad D + sqrt((\\hat p_2 - l_2)^2 + (u_1 - \\hat p_1)^2 ) $$\n\nNote that earlier versions of SAS PROC FREQ (before implementation of the MN method) allowed the option CL=WILSON or CL=SCORE to produce this method, but it is not really a score method. As of SAS/STAT 15.4, the CL=WILSON option (undocumented) still gives the Newcombe interval, but CL=SCORE gives the Miettinen-Nurminen score method.\n\n## Miettinen-Nurminen, Mee and other Asymptotic Score Methods\n\nThese truly are score methods, as they are based on an extension of the score methodology applied to two independent proportions, using the contrast function $S(\\theta) = \\hat p_1 - \\hat p_2 - \\theta$, which for any given value of $\\theta$ has expectation zero, and variance $\\{\\tilde p_1 (1 - \\tilde p_1)/n_1 + \\tilde p_2 (1 - \\tilde p_2)/n_2\\} \\times N/(N-1)$ where $N = n_1 + n_2$.\n\nThe Mee variant of the method omits the $N/(N-1)$ variance bias correction factor (sometimes referred to as an $'N-1'$ correction).\n\nGart and Nam derived a similar method, arrived at from a different underlying 'efficient score' methodology, so the formulae look different but are essentially equivalent to the Mee interval. They added a correction for skewness to improve one-sided coverage. The skewness correction was applied to the Miettinen-Nurminen formula for all contrast parameters by Laud, to give the SCAS method[@laud2017]. The SCAS method is available for SAS via the `%SCORECI` macro.\n\n## Agresti-Caffo Method\n\nSimilar to the Agresti-Coull method for a single proportion, the Agresti-Caffo interval is designed to be an easily taught method to enable the CI to be calculated by hand. The formula involves simply adding one success and one failure to each sample, and then using the Wald formula.\n\n## 'Exact' Methods\n\nTo obtain 'exact' confidence intervals, you need to add an `EXACT RISKDIFF` statement to the PROC FREQ.\n\nThe current default method produced by PROC FREQ (`METHOD=SCORE`) is the Chan-Zhang variant. This is undoubtedly an improvement on the extremely over-conservative Santner-Snell method (obtained with the `RISKDIFF(METHOD=NOSCORE)` option in the EXACT statement), but is more computationally intensive, and can result in a Warning in the SAS log about long computation times.\n\nThe third alternative 'exact' method by Agresti & Min (`RISKDIFF(METHOD=SCORE2)`) is less conservative, but is only guaranteed to achieve strictly conservative two-sided coverage, so is not appropriate for use in one-sided hypothesis testing.\n\n# Methods for Calculating Confidence Intervals for Relative Risk or Odds Ratio from 2 independent samples\n\nSelected methods have been compared graphically for their 1-sided performance[@laud2017, see supplementary material], with the observation that optimum 2-sided coverage follows directly from optimum 1-sided coverage (while the reverse is not true). It has been noted previously that the ratio contrasts suffer a greater imbalance in 1-sided coverage than RD[@gart1990]. Therefore, skewness correction is particularly valuable here, but it is not available in SAS PROC FREQ.\n\nSAS PROC FREQ is able to calculate CIs for RR or OR using the following methods: Miettinen-Nurminen (SCORE), Likelihood Ratio (LR), Wald, Haldane Modified Wald. There is also a second version of the Asymptotic Score method (which for RR is the Koopman method) omitting the 'N-1' variance bias correction, using the `CL=(SCORE(CORRECT=NO))` option - note this is NOT referring to a 'continuity correction' (and also note that no indication is given in the output that this option has been applied). For OR, an additional `CL=MIDP` option is also available.\n\nFor RR, there are two versions of the 'Wald' method (also known as the Katz log and adjusted log methods), which are based on an assumption that log(RR) is distributed normally with variance $1/x_1 + 1/x_2 - 1/n_1 - 1/n_2$. The adjusted version adds 0.5 to each denominator to permit calculation of an interval when no events are observed in one of the groups.\n\nSimilarly, there are two versions of the 'Wald' method for OR (also known as the Woolf logit and Gart adjusted logit intervals). SAS documentation attributes the adjustments for both contrasts to Haldane, but they are the same adjustments.\n\nFor conservative coverage, the 'Exact' methods are provided. For RR this includes the 3 variants described above for RD, while only one version is given for OR. Run times for the `METHOD=SCORE` version for RR may be lengthy (but the faster alternative Santner-Snell version produces a fairly uninterpretable interval of \\[0.03, 23698\\] for the example dataset used in this article). Continuity adjustments are not implemented for any method for RR or OR.\n\nThe SCAS method (which addresses asymmetric one-sided coverage of the MN Score method which is particularly pronounced for RR, and also adds a further bias correction for OR[@laud2018]), is not available in PROC FREQ, but is given by the SAS macro `%SCORECI`.\n\n# Continuity Adjusted Methods\n\nSAS provides an option (`CORRECT`) to apply continuity adjustment to the Wald or Newcombe methods for more conservative coverage, but this only applies for the RISKDIFF contrast, not RELRISK or ODDSRATIO. The Hauck-Anderson (HA) method for RD is a slightly less conservative variation of a continuity adjustment. Note however that all of these methods fail to achieve strictly conservative coverage, although the adjusted Newcombe method comes close[@laud2014].\n\nIt is important to note that the `CORRECT` sub-option for the MN/Score method serves an entirely different purpose. The Miettinen-Nurminen method is **not** a 'continuity-corrected' version of the Mee interval. Rather, the `CORRECT=NO` option removes the variance bias correction factor N/(N-1) from the Miettinen-Nurminen formula in order to produce the Mee version of the score method for RD (and equivalent un-corrected score methods for RR (Koopman) and OR).\n\nNo continuity adjustment is currently available for the score methods in SAS. A 'sliding scale' adjustment has been described[@laud2014] and implemented for MN and SCAS in the ratesci package for R, but not yet added to the %SCORECI macro.\n\n# Consistency with Hypothesis Tests\n\nWithin SAS PROC FREQ for the asymptotic methods for RD, consistency with a traditional Chi-squared test for association (**Karl** Pearson version), and the Farrington-Manning test for non-inferiority, is provided by the Mee CI (`CL=SCORE(CORRECT=MEE)`). That method is similar to MN but without the 'N-1' correction factor (the omission of which produces coverage that is slightly below the nominal confidence level on average, instead of slightly above). Note that the MN method (including the correction factor) is consistent with the **Egon** Pearson 'N-1' version of the chi-squared test[@campbell2007]. SAS PROC FREQ does not produce that test, nor does it offer the option to include the 'N-1' adjustment when requesting a non-inferiority test with the `NONINF` option. Consequently, there is a risk of contradictory results if using PROC FREQ to obtain a MN CI with a corresponding non-inferiority test.\n\nFor the SCAS or MN methods, the `%SCORECI` macro provides the p-value for a specified NI margin, with guaranteed consistency with the CI.\n\nIf an EXACT statement is used to produce CIs, SAS does not offer any matching hypothesis tests for NI or equivalence testing.\n\n# Example Code using PROC FREQ\n\nIt is important to check the output to ensure that you are modelling Active - Placebo, and response = Yes (not Response=No). By default SAS sorts alphabetically and calculates CI's for the first column. You can change this by using the `COLUMN=` Option in riskdiff or by sorting the dataset (here by trt, then descending resp), and then using `order=data` in the proc freq. This tells SAS to use the order you have sorted the data by. SAS confirms this by saying \"Difference is (Row 1 - Row 2)\" and \"Column 1 (resp=Yes)\".\n\nSimilarly for relrisk, although the output does not state that the RR is calculated as (Row 1) / (Row 2). If treatment groups are labelled differently, you might need to sort by descending trt to obtain the correct contrast (note that unlike for RD, the same result cannot be obtained by setting `COLUMN=2`).\n\nSAS output often rounds to 3 or 4 decimal places in the output window, however the full values can be obtained using SAS ODS statements.\n\n``` sas\n\n****************************;\n*** Risk Difference examples;\n****************************;\n\n*** Wald, Newcombe, Agresti/Caffo, and Miettinen-Nurminen methods; \nproc freq data=adcibc2 order=data; \n table trt*resp /riskdiff(CL=(wald newcombe ac mn); \nrun; \n\n*** Mee score method;\nproc freq data=adcibc2 order=data; \n table trt*resp /riskdiff(CL=(mn(mee) norisks); \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_noCC.png){fig-align='center' width=75%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_Mee.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** exact (Chan-Zhang) and continuity-adjusted methods for conservative coverage; \nproc freq data=adcibc2 order=data;\n exact riskdiff (method=noscore); \n table trt*resp/riskdiff(CL=(exact ha wald(correct) newcombe(correct)) norisks); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_CC.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** exact (Santner-Snell) method; \nproc freq data=adcibc2 order=data;\n exact riskdiff (method=noscore); \n table trt*resp/riskdiff(CL=exact norisks); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_ExactSS.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** 2-sided exact (Agresti-Min) method; \nproc freq data=adcibc2 order=data;\n exact riskdiff (method=score2); \n table trt*resp/riskdiff(CL=exact norisks); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_ExactAM.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** MN and SCAS methods from %SCORECI macro;\n*** First manipulate the data to a form for input to the macro;\nproc tabulate data=adcibc2 out=tab2;\n class trt resp;\n table (resp all),trt;\nrun;\ndata ds(keep = n1 n0 e1 e0);\n set tab2;\n by _page_;\n retain n1 n0 e1 e0;\n * Initialise counts in case there are none in the observed data;\n if first._page_ then do;\n e1 = 0;\n e0 = 0;\n end;\n if upcase(trt) = \"ACT\" then do;\n if _type_ = \"11\" and resp = \"Yes\" then e1 = n;\n if _type_ = \"10\" then n1 = n;\n end;\n else if upcase(trt) = \"PBO\" then do;\n if _type_ = \"11\" and resp = \"Yes\" then e0 = n;\n if _type_ = \"10\" then n0 = n;\n end;\n if last._page_ then output;\nrun;\n\n*** Miettinen-Nurminen CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE);\n\n*** Mee Asymptotic Score CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE, bcf=FALSE);\n\n*** SCAS CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE);\n\n*** Farrington-Manning NI test (p=0.0248) contradicts MN interval (LCL < -0.036);\n*** (Arbitrary NI margin of -0.036 used for illustration)\nproc freq data=adcibc2 order=data;\n table trt*resp / riskdiff(CL=mn noninf margin=0.036 method=score); \nrun;\n\n*** Miettinen-Nurminen CI with consistent NI test: PVAL_R > 0.025; \n%scoreci(ds, stratify=FALSE, skew=FALSE, delta=-0.036);\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDNonInf.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n****************************;\n*** Relative risk examples;\n****************************;\n\n*** Wald, LR, and Miettinen and Nurminen methods; \nproc freq data=adcibc2 order=data; \n table trt*resp /relrisk(CL=(wald waldmodified lr score)); \nrun; \n*** Koopman Asymptotic Score method without the 'N-1' correction;\nproc freq data=adcibc2 order=data; \n table trt*resp /relrisk(CL=(score(correct=no))); \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_Koopman.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** Exact method (Chan-Zhang); \nproc freq data=adcibc2 order=data;\n exact relrisk; \n table trt*resp/relrisk(CL=(exact)); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_ExactCZ.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** Exact (Santner-Snell);\nproc freq data=adcibc2 order=data;\n exact relrisk (method=noscore); \n table trt*resp / relrisk(CL=exact); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_ExactSS.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** 2-sided exact (Agresti-Min) method; \nproc freq data=adcibc2 order=data;\n exact relrisk (method=score2); \n table trt*resp / relrisk(CL=exact); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_ExactAM.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** Miettinen-Nurminen Asymptotic Score CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE, contrast=RR);\n*** Koopman Asymptotic Score CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE, bcf=FALSE, contrast=RR);\n*** SCAS CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, contrast=RR);\n```\n\n``` sas\n****************************;\n*** Odds Ratio examples;\n****************************;\n\n* Wald, LR, mid-P and Miettinen and Nurminen methods; \nproc freq data=adcibc2 order=data; \n table trt*resp /or(CL=(wald waldmodified lr score midp)); \nrun; \n* Asymptotic Score method without 'N-1' correction;\nproc freq data=adcibc2 order=data; \n table trt*resp /or(CL=(score(correct=no))); \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleORCI.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleORCI_ScoreN.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** 'exact' method; \nproc freq data=adcibc2 order=data;\n exact or; \n table trt*resp/or(CL=(exact)); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleORCI_Exact.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** Miettinen-Nurminen CI;\n%scoreci(ds, stratify=FALSE, skew=FALSE, orbias=FALSE, contrast=OR);\n*** SCAS CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, contrast=OR);\n```\n\n# References\n", - "supporting": [], + "markdown": "---\ntitle: \"Confidence Intervals for Independent Proportions in SAS\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n# Introduction\n\nThis page covers confidence intervals for comparisons of two independent proportions in SAS, including the contrast parameters for risk difference (RD) $\\theta_{RD} = p_1 - p_2$, relative risk (RR) $\\theta_{RR} = p_1 / p_2$, and odds ratio (OR) $\\theta_{OR} = p_1(1-p_2) / (p_2(1-p_1))$.\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nCaution is required if there are no responders (or all responders) in both groups, which might happen in a subgroup analysis for example. PROC FREQ (as of v9.4) does not output any confidence intervals in this case, when valid CIs can (and should) be reported for the RD contrast, since the dataset provides an estimate of zero for RD (and the confidence in the estimate is proportional to the sample size). Similarly, if $\\hat p_1 = \\hat p_2 = 1$ then an estimate and confidence interval can be obtained for RR, but not from PROC FREQ.\n\n# Data used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `Act` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n```sas\nproc import datafile = 'data/adcibc.csv'\n out = adcibc\n dbms = csv\n replace;\n getnames = yes;\n guessingrows = max;\nrun;\n\ndata adcibc2 (keep=trt resp) ;\n set adcibc; \n if aval gt 4 then resp=\"Yes\";\n else resp=\"No\"; \n if trtp=\"Placebo\" then trt=\"PBO\";\n else trt=\"Act\"; \nrun;\n \n* Sort to ensure that the outcome of interest (\"Yes\" in this example) is first;\n* when using default COLUMN=1 option in the TABLES statement;\nproc sort data=adcibc2; \nby trt descending resp; \nrun; \n```\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects, p1 = 0.2338 (23.38% responders), while for the placebo treatment p2 = 12/77 = 0.1558, giving a risk difference of 0.0779, relative risk 1.50, and odds ratio 1.6525.\n\n```sas\nproc freq data=adcibc2;\n table trt*resp/ nopct nocol;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/2by2crosstab.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# Methods for Calculating Confidence Intervals for Proportion Difference from 2 independent samples\n\nThis [paper](https://www.lexjansen.com/wuss/2016/127_Final_Paper_PDF.pdf) describes many methods for the calculation of confidence intervals for 2 independent proportions. The 2-sided and 1-sided performance of many of the same methods have been compared graphically[@laud2014]. According to a recent paper[@bai2021], the most commonly reported method in non-inferiority clinical trials for antibiotics is the Wald asymptotic normal approximation (despite its well-documented poor performance), followed by the Miettinen-Nurminen (asymptotic score) method. More recently, an improved variant of the Miettinen-Nurminen method (SCAS) was developed, by including a skewness correction designed to optimise the performance in terms of one-sided coverage for NI testing. SCAS corrects the slightly asymmetrical coverage of the Miettinen-Nurminen interval (note the skewness is more pronounced when analysing the RR contrast, and/or when group sizes are imbalanced).\n\nSAS PROC FREQ is able to calculate CIs for RD using the following methods: Agresti/Caffo (AC), Miettinen and Nurminen (MN or SCORE), Mee (MN(Mee)), Newcombe Hybrid Score (MOVER), and Wald. For conservative coverage, there is the 'Exact' method, or continuity-adjusted versions of the Wald and Newcombe methods, and also the Hauck-Anderson (HA) continuity-adjustment.\n\nThe SCAS method is not available in PROC FREQ, but can be produced using a SAS macro (`%SCORECI`) which can be downloaded from .\n\n## Normal Approximation Method (Also known as the Wald Method)\n\nThe difference between two independent sample proportions is calculated as: $\\hat \\theta_{RD} = \\hat p_1 - \\hat p_2 = x_1 / n_1 - x_2 / n_2$\n\nThe Wald CI for $\\theta_{RD}$ is calculated using:\n\n$\\hat \\theta_{RD} \\pm z_{\\alpha/2} \\times SE(\\hat \\theta_{RD})$,\n\nwhere $SE (\\hat \\theta_{RD}) = \\sqrt{( \\frac{\\hat p_1 (1-\\hat p_1)}{n_1} + \\frac{\\hat p_2 (1-\\hat p_2)}{n_2})}$\n\nWith continuity correction, the equation becomes\n\n$\\hat \\theta_{RD} \\pm (CC + z_{\\alpha/2} \\times SE(\\hat \\theta_{RD}))$,\n\nwhere $CC = \\frac{1}{2} (\\frac{1}{n_1} + \\frac{1}{n_2})$\n\n## Newcombe Method (Also known as the Hybrid Score method, Square-and-Add, or the Method of Variance Estimates Recovery (MOVER) )\n\nDerive the confidence intervals for the separate proportions in each group, $p_1$ and $p_2$, using the Wilson Score Method equations as described [here](ci_for_prop.html).\n\nLet $l_1$ = Lower CI for sample 1, and $u_1$ be the upper CI for sample 1.\n\nLet $l_2$ = Lower CI for sample 2, and $u_2$ be the upper CI for sample 2.\n\nLet D = $\\hat p_1 - \\hat p_2$ (the difference between the observed proportions)\n\nThe CI for $\\theta_{RD}$ the difference between two proportions is: $$ D - sqrt((\\hat p_1 - l_1)^2+(u_2 - \\hat p_2)^2) \\quad, \\quad D + sqrt((\\hat p_2 - l_2)^2 + (u_1 - \\hat p_1)^2 ) $$\n\nNote that earlier versions of SAS PROC FREQ (before implementation of the MN method) allowed the option CL=WILSON or CL=SCORE to produce this method, but it is not really a score method. As of SAS/STAT 15.4, the CL=WILSON option (undocumented) still gives the Newcombe interval, but CL=SCORE gives the Miettinen-Nurminen score method.\n\n## Miettinen-Nurminen, Mee and other Asymptotic Score Methods\n\nThese truly are score methods, as they are based on an extension of the score methodology applied to two independent proportions, using the contrast function $S(\\theta) = \\hat p_1 - \\hat p_2 - \\theta$, which for any given value of $\\theta$ has expectation zero, and variance $\\{\\tilde p_1 (1 - \\tilde p_1)/n_1 + \\tilde p_2 (1 - \\tilde p_2)/n_2\\} \\times N/(N-1)$ where $N = n_1 + n_2$.\n\nThe Mee variant of the method omits the $N/(N-1)$ variance bias correction factor (sometimes referred to as an $'N-1'$ correction).\n\nGart and Nam derived a similar method, arrived at from a different underlying 'efficient score' methodology, so the formulae look different but are essentially equivalent to the Mee interval. They added a correction for skewness to improve one-sided coverage. The skewness correction was applied to the Miettinen-Nurminen formula for all contrast parameters by Laud, to give the SCAS method[@laud2017]. The SCAS method is available for SAS via the `%SCORECI` macro.\n\n## Agresti-Caffo Method\n\nSimilar to the Agresti-Coull method for a single proportion, the Agresti-Caffo interval is designed to be an easily taught method to enable the CI to be calculated by hand. The formula involves simply adding one success and one failure to each sample, and then using the Wald formula.\n\n## 'Exact' Methods\n\nTo obtain 'exact' confidence intervals, you need to add an `EXACT RISKDIFF` statement to the PROC FREQ.\n\nThe current default method produced by PROC FREQ (`METHOD=SCORE`) is the Chan-Zhang variant. This is undoubtedly an improvement on the extremely over-conservative Santner-Snell method (obtained with the `RISKDIFF(METHOD=NOSCORE)` option in the EXACT statement), but is more computationally intensive, and can result in a Warning in the SAS log about long computation times.\n\nThe third alternative 'exact' method by Agresti & Min (`RISKDIFF(METHOD=SCORE2)`) is less conservative, but is only guaranteed to achieve strictly conservative two-sided coverage, so is not appropriate for use in one-sided hypothesis testing.\n\n# Methods for Calculating Confidence Intervals for Relative Risk or Odds Ratio from 2 independent samples\n\nSelected methods have been compared graphically for their 1-sided performance[@laud2017, see supplementary material], with the observation that optimum 2-sided coverage follows directly from optimum 1-sided coverage (while the reverse is not true). It has been noted previously that the ratio contrasts suffer a greater imbalance in 1-sided coverage than RD[@gart1990]. Therefore, skewness correction is particularly valuable here, but it is not available in SAS PROC FREQ.\n\nSAS PROC FREQ is able to calculate CIs for RR or OR using the following methods: Miettinen-Nurminen (SCORE), Likelihood Ratio (LR), Wald, Haldane Modified Wald. There is also a second version of the Asymptotic Score method (which for RR is the Koopman method) omitting the 'N-1' variance bias correction, using the `CL=(SCORE(CORRECT=NO))` option - note this is NOT referring to a 'continuity correction' (and also note that no indication is given in the output that this option has been applied). For OR, an additional `CL=MIDP` option is also available.\n\nFor RR, there are two versions of the 'Wald' method (also known as the Katz log and adjusted log methods), which are based on an assumption that log(RR) is distributed normally with variance $1/x_1 + 1/x_2 - 1/n_1 - 1/n_2$. The adjusted version adds 0.5 to each denominator to permit calculation of an interval when no events are observed in one of the groups.\n\nSimilarly, there are two versions of the 'Wald' method for OR (also known as the Woolf logit and Gart adjusted logit intervals). SAS documentation attributes the adjustments for both contrasts to Haldane, but they are the same adjustments.\n\nFor conservative coverage, the 'Exact' methods are provided. For RR this includes the 3 variants described above for RD, while only one version is given for OR. Run times for the `METHOD=SCORE` version for RR may be lengthy (but the faster alternative Santner-Snell version produces a fairly uninterpretable interval of \\[0.03, 23698\\] for the example dataset used in this article). Continuity adjustments are not implemented for any method for RR or OR.\n\nThe SCAS method (which addresses asymmetric one-sided coverage of the MN Score method which is particularly pronounced for RR, and also adds a further bias correction for OR[@laud2018]), is not available in PROC FREQ, but is given by the SAS macro `%SCORECI`.\n\n# Continuity Adjusted Methods\n\nSAS provides an option (`CORRECT`) to apply continuity adjustment to the Wald or Newcombe methods for more conservative coverage, but this only applies for the RISKDIFF contrast, not RELRISK or ODDSRATIO. The Hauck-Anderson (HA) method for RD is a slightly less conservative variation of a continuity adjustment. Note however that all of these methods fail to achieve strictly conservative coverage, although the adjusted Newcombe method comes close[@laud2014].\n\nIt is important to note that the `CORRECT` sub-option for the MN/Score method serves an entirely different purpose. The Miettinen-Nurminen method is **not** a 'continuity-corrected' version of the Mee interval. Rather, the `CORRECT=NO` option removes the variance bias correction factor N/(N-1) from the Miettinen-Nurminen formula in order to produce the Mee version of the score method for RD (and equivalent un-corrected score methods for RR (Koopman) and OR).\n\nNo continuity adjustment is currently available for the score methods in SAS. A 'sliding scale' adjustment has been described[@laud2014] and implemented for MN and SCAS in the ratesci package for R, but not yet added to the %SCORECI macro.\n\n# Consistency with Hypothesis Tests\n\nWithin SAS PROC FREQ for the asymptotic methods for RD, consistency with a traditional Chi-squared test for association (**Karl** Pearson version), and the Farrington-Manning test for non-inferiority, is provided by the Mee CI (`CL=SCORE(CORRECT=MEE)`). That method is similar to MN but without the 'N-1' correction factor (the omission of which produces coverage that is slightly below the nominal confidence level on average, instead of slightly above). Note that the MN method (including the correction factor) is consistent with the **Egon** Pearson 'N-1' version of the chi-squared test[@campbell2007]. SAS PROC FREQ does not produce that test, nor does it offer the option to include the 'N-1' adjustment when requesting a non-inferiority test with the `NONINF` option. Consequently, there is a risk of contradictory results if using PROC FREQ to obtain a MN CI with a corresponding non-inferiority test.\n\nFor the SCAS or MN methods, the `%SCORECI` macro provides the p-value for a specified NI margin, with guaranteed consistency with the CI.\n\nIf an EXACT statement is used to produce CIs, SAS does not offer any matching hypothesis tests for NI or equivalence testing.\n\n# Example Code using PROC FREQ\n\nIt is important to check the output to ensure that you are modelling Active - Placebo, and response = Yes (not Response=No). By default SAS sorts alphabetically and calculates CI's for the first column. You can change this by using the `COLUMN=` Option in riskdiff or by sorting the dataset (here by trt, then descending resp), and then using `order=data` in the proc freq. This tells SAS to use the order you have sorted the data by. SAS confirms this by saying \"Difference is (Row 1 - Row 2)\" and \"Column 1 (resp=Yes)\".\n\nSimilarly for relrisk, although the output does not state that the RR is calculated as (Row 1) / (Row 2). If treatment groups are labelled differently, you might need to sort by descending trt to obtain the correct contrast (note that unlike for RD, the same result cannot be obtained by setting `COLUMN=2`).\n\nSAS output often rounds to 3 or 4 decimal places in the output window, however the full values can be obtained using SAS ODS statements.\n\n```sas\n\n****************************;\n*** Risk Difference examples;\n****************************;\n\n*** Wald, Newcombe, Agresti/Caffo, and Miettinen-Nurminen methods; \nproc freq data=adcibc2 order=data; \n table trt*resp /riskdiff(CL=(wald newcombe ac mn); \nrun; \n\n*** Mee score method;\nproc freq data=adcibc2 order=data; \n table trt*resp /riskdiff(CL=(mn(mee) norisks); \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_noCC.png){fig-align='center' width=75%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_Mee.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** exact (Chan-Zhang) and continuity-adjusted methods for conservative coverage; \nproc freq data=adcibc2 order=data;\n exact riskdiff (method=noscore); \n table trt*resp/riskdiff(CL=(exact ha wald(correct) newcombe(correct)) norisks); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_CC.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** exact (Santner-Snell) method; \nproc freq data=adcibc2 order=data;\n exact riskdiff (method=noscore); \n table trt*resp/riskdiff(CL=exact norisks); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_ExactSS.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** 2-sided exact (Agresti-Min) method; \nproc freq data=adcibc2 order=data;\n exact riskdiff (method=score2); \n table trt*resp/riskdiff(CL=exact norisks); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDCI_ExactAM.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** MN and SCAS methods from %SCORECI macro;\n*** First manipulate the data to a form for input to the macro;\nproc tabulate data=adcibc2 out=tab2;\n class trt resp;\n table (resp all),trt;\nrun;\ndata ds(keep = n1 n0 e1 e0);\n set tab2;\n by _page_;\n retain n1 n0 e1 e0;\n * Initialise counts in case there are none in the observed data;\n if first._page_ then do;\n e1 = 0;\n e0 = 0;\n end;\n if upcase(trt) = \"ACT\" then do;\n if _type_ = \"11\" and resp = \"Yes\" then e1 = n;\n if _type_ = \"10\" then n1 = n;\n end;\n else if upcase(trt) = \"PBO\" then do;\n if _type_ = \"11\" and resp = \"Yes\" then e0 = n;\n if _type_ = \"10\" then n0 = n;\n end;\n if last._page_ then output;\nrun;\n\n*** Miettinen-Nurminen CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE);\n\n*** Mee Asymptotic Score CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE, bcf=FALSE);\n\n*** SCAS CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE);\n\n*** Farrington-Manning NI test (p=0.0248) contradicts MN interval (LCL < -0.036);\n*** (Arbitrary NI margin of -0.036 used for illustration)\nproc freq data=adcibc2 order=data;\n table trt*resp / riskdiff(CL=mn noninf margin=0.036 method=score); \nrun;\n\n*** Miettinen-Nurminen CI with consistent NI test: PVAL_R > 0.025; \n%scoreci(ds, stratify=FALSE, skew=FALSE, delta=-0.036);\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRDNonInf.png){fig-align='center' width=75%}\n:::\n:::\n\n\n``` sas\n\n****************************;\n*** Relative risk examples;\n****************************;\n\n*** Wald, LR, and Miettinen and Nurminen methods; \nproc freq data=adcibc2 order=data; \n table trt*resp /relrisk(CL=(wald waldmodified lr score)); \nrun; \n*** Koopman Asymptotic Score method without the 'N-1' correction;\nproc freq data=adcibc2 order=data; \n table trt*resp /relrisk(CL=(score(correct=no))); \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_Koopman.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** Exact method (Chan-Zhang); \nproc freq data=adcibc2 order=data;\n exact relrisk; \n table trt*resp/relrisk(CL=(exact)); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_ExactCZ.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n*** Exact (Santner-Snell);\nproc freq data=adcibc2 order=data;\n exact relrisk (method=noscore); \n table trt*resp / relrisk(CL=exact); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_ExactSS.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** 2-sided exact (Agresti-Min) method; \nproc freq data=adcibc2 order=data;\n exact relrisk (method=score2); \n table trt*resp / relrisk(CL=exact); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleRRCI_ExactAM.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** Miettinen-Nurminen Asymptotic Score CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE, contrast=RR);\n*** Koopman Asymptotic Score CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, skew=FALSE, bcf=FALSE, contrast=RR);\n*** SCAS CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, contrast=RR);\n```\n\n``` sas\n****************************;\n*** Odds Ratio examples;\n****************************;\n\n* Wald, LR, mid-P and Miettinen and Nurminen methods; \nproc freq data=adcibc2 order=data; \n table trt*resp /or(CL=(wald waldmodified lr score midp)); \nrun; \n* Asymptotic Score method without 'N-1' correction;\nproc freq data=adcibc2 order=data; \n table trt*resp /or(CL=(score(correct=no))); \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleORCI.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleORCI_ScoreN.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** 'exact' method; \nproc freq data=adcibc2 order=data;\n exact or; \n table trt*resp/or(CL=(exact)); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_2sampleORCI_Exact.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\n\n*** Miettinen-Nurminen CI;\n%scoreci(ds, stratify=FALSE, skew=FALSE, orbias=FALSE, contrast=OR);\n*** SCAS CI - macro from https://github.com/petelaud/ratesci-sas;\n%scoreci(ds, stratify=FALSE, contrast=OR);\n```\n\n# References\n", + "supporting": [ + "ci_for_2indep_prop_files" + ], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/SAS/ci_for_paired_prop/execute-results/html.json b/_freeze/SAS/ci_for_paired_prop/execute-results/html.json index 40746ddef..81a40c06a 100644 --- a/_freeze/SAS/ci_for_paired_prop/execute-results/html.json +++ b/_freeze/SAS/ci_for_paired_prop/execute-results/html.json @@ -1,9 +1,11 @@ { - "hash": "bc1c376a45ed56e9ad4ab8d2bedf92ef", + "hash": "fe83846483323502ea5e794a1b2fbc2b", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Confidence intervals for Paired Proportions in SAS\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n## Introduction\n\nThis page covers confidence intervals for comparisons of two paired proportions in SAS. Note that PROC FREQ will give a McNemar test for such an analysis (via the `AGREE` option), but with no corresponding confidence interval. Some methods are described below to facilitate computing them programmatically. A SAS macro (`%PAIRBINCI`) can be downloaded from to obtain the Asymptotic Score methods, which have superior coverage properties.\n\nAnalysis may be based on the risk difference (RD) contrast $\\theta_{RD} = p_1 - p_2$ or relative risk (RR) $\\theta_{RR} = p_1 / p_2$. In the context of paired data, and of particular interest for case-control studies, the odds ratio (OR) is estimated conditional on the number of discordant pairs, calculated as $\\theta_{OR} = p_{12} / p_{21}$, resulting in confidence intervals that are simple transformations of a CI for a single proportion.\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nYou may experience paired data in any of the following types of situation:\n\n- Tumour assesssments classified as Progressive Disease or Not Progressive Disease performed by an Investigator and separately by an independent panel.\n\n- A paired case-control study (each subject taking active treatment is matched to a patient taking control)\n\n- A cross-over trial where the same subjects take both medications\n\nIn all these cases, the calculated proportions for the 2 groups are not independent.\n\nUsing a cross-over study as our example, a 2 x 2 table can be formed as follows:\n\n+-----------------------+---------------+---------------+-------------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+=============+\n| Active Response = Yes | r | s | r+s |\n+-----------------------+---------------+---------------+-------------+\n| Active Response = No | t | u | t+u |\n+-----------------------+---------------+---------------+-------------+\n| Total | r+t | s+u | N = r+s+t+u |\n+-----------------------+---------------+---------------+-------------+\n\nThe table below indicates the proportions that are estimated from the data (note the difference in structure compared to the usual 2x2 table for independent proportions).\n\n+-----------------------+---------------+---------------+-----------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+===========+\n| Active Response = Yes | $p_{11}$ | $p_{12}$ | $p_1$ |\n+-----------------------+---------------+---------------+-----------+\n| Active Response = No | $p_{21}$ | $p_{22}$ | $(1-p_1)$ |\n+-----------------------+---------------+---------------+-----------+\n| Total | $p_2$ | $(1-p_2)$ | |\n+-----------------------+---------------+---------------+-----------+\n\nThe proportions of subjects responding on each treatment are:\n\nActive: $\\hat p_1 = (r+s)/N$ and Placebo: $\\hat p_2= (r+t)/N$\n\nThe estimated difference between the proportions for each treatment is: $D=\\hat p_1 - \\hat p_2 = (s-t)/N$\n\nThe estimated relative risk is $(r+s)/(r+t)$.\n\nThe estimated conditional odds ratio is $s/t$.\n\n## Data used\n\nWorked examples below use the following artificial dataset:\n\n+-----------------------+---------------+---------------+----------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+==========+\n| Active Response = Yes | r = 20 | s = 15 | r+s = 35 |\n+-----------------------+---------------+---------------+----------+\n| Active Response = No | t = 6 | u = 5 | t+u = 11 |\n+-----------------------+---------------+---------------+----------+\n| Total | r+t = 26 | s+u = 20 | N = 46 |\n+-----------------------+---------------+---------------+----------+\n\n## Methods for Calculating Confidence Intervals for Proportion Difference from matched pairs\n\n### Normal Approximation Method (Also known as the Wald Method)\n\nIn large random samples from paired data, the sampling distribution of the difference between two proportions is assumed to follow the normal distribution. The estimated SE for the difference and 95% confidence interval can be calculated using the following equations.\n\n$SE(D)=\\frac{1}{N} \\times \\sqrt{(s+t-\\frac{(s-t)^2}{N})} = \\sqrt{\\frac{1}{N}(\\hat p_{12} + \\hat p_{21} - (\\hat p_{12} - \\hat p_{21})^2)}$\n\n$D-z_{\\alpha/2} \\times SE(D)$ to $D+z_{\\alpha/2} \\times SE(D)$\n\nSo, for the example dataset,\n\nD = (15-6) /46 = 0.196\n\nSE(D) = 1/ 46 \\* sqrt (15+6- (((15-6)\\^2)/46) ) = 0.09535\n\nLower CI= 0.196 - 1.96 \\*0.0954 = 0.008764\n\nUpper CI = 0.196 + 1.96 \\* 0.0954 = 0.382541\n\nLike other Wald methods, this approach has been found to perform poorly, failing to achieve the nominal confidence level[@newcombe1998][@fagerland2014]. An adjustment was suggested by Bonett & Price, similar to Agresti-Caffo for independent proportions, using $N'=N+2$ instead of $N$, and $\\tilde p_{12} = (s+1)/N'$ and $\\tilde p_{21} = (r+1)/N'$, instead of $\\hat p_{12}$ and $\\hat p_{21}$.\n\n### Newcombe Method (Also known as the Hybrid Score method, Square-and-Add, or the Method of Variance Estimates Recovery (MOVER) )\n\nNote that Newcombe described this as a 'Score' method in his 1998 paper, but his later work labelled it as 'Square-and-add' - it is not really a score method, but a hybrid method obtained by combining Wilson Score intervals calculated separately for the two proportions.\n\nDerive the confidence intervals for each of the individual single samples 1 and 2, using the Wilson Method equations as described [here](ci_for_prop.html).\n\nLet $l_1$ = Lower CI for sample 1, and $u_1$ be the upper CI for sample 1.\n\nLet $l_2$ = Lower CI for sample 2, and $u_2$ be the upper CI for sample 2.\n\nWe then define $\\phi$ which is an estimate of the correlation coefficient, used to correct for $\\hat p_1$ and $\\hat p_2$ not being independent. As the samples are related, $\\phi$ is usually positive and thus makes the confidence interval smaller (narrower).\n\nIf any of r+s, t+u, r+t, s+u are zero, then set $\\phi$ to be 0.\n\nOtherwise we calculate A, B and C, and $\\phi=C / \\sqrt A$\n\nIn the above: $A=(r+s)(t+u)(r+t)(s+u)$ and $B=(ru-st)$\n\nTo calculate C follow the table below. N = sample size.\n\n| Condition of B | Set C equal to |\n|---------------------------|----------------|\n| If B is greater than N/2 | B - N/2 |\n| If B is between 0 and N/2 | 0 |\n| If B is less than 0 | B |\n\nLet $D = \\hat p_1 - \\hat p_2$ (the difference between the observed proportions of responders)\n\nThe CI for the paired difference between the proportions is: $D - \\sqrt{((\\hat p_1 - l_1)^2 - 2\\phi(\\hat p_1 - l_1)(u_2 - \\hat p_2) + (u_2 - \\hat p_2)^2 )}$ to\n\n$D + \\sqrt{((\\hat p_2 - l_2)^2 - 2\\phi(\\hat p_2 - l_2)(u_1 - \\hat p_1) + (u_1 - \\hat p_1)^2 )}$\n\nFirst using the Wilson Method equations for each of the individual single samples 1 and 2.\n\n| | Active | Placebo |\n|----------|---------------|---------------|\n| a | 73.842 | 55.842 |\n| b | 11.974 | 13.728 |\n| c | 99.683 | 99.683 |\n| Lower CI | 0.621 = $l_1$ | 0.422 = $l_2$ |\n| Upper CI | 0.861 = $u_1$ | 0.698 = $u_2$ |\n\n$A=(r+s)(t+u)(r+t)(s+u)$ = 9450000\n\nB = 10\n\nC = 0 (as B is between 0 and N/2)\n\n$\\phi$ = 0.\n\nHence the middle part of the equation simplifies to 0, and therefore in this case the interval is the same as the MOVER interval for independent proportions:\n\nLower CI = $D - \\sqrt{((\\hat p_1 - l_1)^2 + (u_2 - \\hat p_2)^2 )}$ = 0.196 - sqrt \\[ (0.761-0.621)\\^2 + (0.698-0.565) \\^2 \\]\n\nUpper CI = $D + \\sqrt{((\\hat p_2 - l_2)^2 + (u_1 - \\hat p_1)^2 )}$ = 0.196 + sqrt \\[ (0.565-0.422)\\^2 + (0.861-0.761) \\^2 \\]\n\nCI= 0.002602 to 0.369943\n\nOther variants of the MOVER method are possible, and achieve improved coverage properties, by replacing the Wilson method with a more centrally-located interval for the single proportions. For example, using the Jeffreys method gives a CI of (0.003161, 0.373479).\n\n### Asymptotic Score Methods\n\nA genuine score method for paired $\\theta_{RD}$, analogous to the Miettinen-Nurminen method for the unpaired difference, was developed in 1998[@tango1998], using the contrast function $S(\\theta) = \\hat p_1 - \\hat p_2 - \\theta$, together with its variance (accounting for the correlation) estimated at the constrained maximum likelihood estimates of the cell probabilities for each given value of $\\theta$. Generally this is not a method that would be hand-calculated - originally the approach involved an iterative process of evaluating the score function over the range of $\\theta$, but closed-form expressions are now available.\n\nA proposed skewness-corrected version (SCASu) and another with an added 'N-1' variance bias correction (SCAS) is currently under review for publication.\n\nThe Tango and SCAS methods are available for SAS via the `%PAIRBINCI` macro.\n\nFor the example dataset, the Tango CI is (0.000419, 0.377185), the SCASu version is (0.000423, 0.380312), and the SCAS CI is (-0.001869, 0.382194).\n\n## Methods for Calculating Confidence Intervals for Relative Risk from matched pairs\n\n### Normal Approximation Method (Also known as the Wald Method)\n\nThe normal approximation for $\\theta_{RR}$ involves the logged estimate $ln(\\hat \\theta_{RR})$ and its SE which is estimated as: $\\sqrt{\\frac{(s + t)}{(r+s)(r+t)}}$, with the resulting confidence limits being back-transformed to the ratio scale.\n\n$ln(\\hat \\theta_{RR})$ = ln(35/26) = 0.297\n\nSE = sqrt( 21 / (35\\*26) ) = 0.152\n\nLower CI= exp(0.297 - 1.96 \\* 0.152) = 0.99908\n\nUpper CI = exp(0.297 + 1.96 \\* 0.152) = 1.81289\n\n### MOVER Method (Also known as the Hybrid Score method or Square-and-Add)\n\nExtending the methodology for the Newcombe method for RD, this hybrid method combines CIs for the two single proportions (using either the Wilson or Jeffreys method), together with an estimate of their correlation coefficient.\n\n\\[More details & example to be added - not available in SAS\\]\n\nMOVER-Wilson CI: (1.003963, 1.84443). MOVER-Jeffreys CI: (1.004783, 1.851871)\n\n### Asymptotic Score Methods\n\nTwo score methods have also been developed for paired relative risk, which have been shown to be algebraically equivalent[@nam2002][@tang2003]. A proposed skewness-corrected version (SCASu) and another with an added 'N-1' variance bias correction (SCAS) is currently under review for publication.\n\nThese methods are available for SAS via the `%PAIRBINCI` macro. The Tang interval is (1.000639, 1.85982), the SCASu version is (1.00064, 1.867063) and the SCAS CI is (0.997176, 1.874279).\n\n## Methods for Calculating Confidence Intervals for Odds Ratio from matched pairs\n\nMost methods for the paired conditional odds ratio are based on calculating an interval $(L^*, U^*)$ for the single proportion $s/(s+t)$, using any of the methods described [here](ci_for_prop.html), and then transforming it as $(\\frac{L^*}{1-L^*}, \\frac{U^*}{1-U^*})$. Generally, the CIs for $\\theta_{OR}$ inherit the properties of the interval method selected for the single proportion.\n\n## Continuity Adjusted Methods\n\nContinuity adjustments can be incorporated into the formulae for most (if not all) of the above methods. These have been implemented for Tango/Tang, SCAS and MOVER methods in the ratesci package for R, but not yet added to the `%PAIRBINCI` macro.\n\n## Consistency with Hypothesis Tests\n\nThe Asymptotic Score methods without 'N-1' correction (Tango and SCASu methods for RD, and Tang/Nam-Blackwelder and SCASu methods for RR) are guaranteed to be consistent with the result of the McNemar test. The SCAS method comes with a corresponding test (as yet unpublished), which is a modified version of the McNemar test.\n\n## Example Code using PROC FREQ\n\nAs mentioned in the introduction, PROC FREQ does not currently have a procedure which outputs the above confidence intervals for matched proportions\\*. A macro (`%PAIRBINCI`) is available [here](https://github.com/petelaud/ratesci-sas){.uri} which provides the asymptotic score method by Tango, and a skewness corrected version (paper under review).\n\n\\*Actually that's not strictly true. A SAS Support knowledge base article [here](https://support.sas.com/kb/46/997.html) describes an approach using stratified analysis to obtain a CI for the 'common risk difference', which appears to give the Wald CI. However, this requires data to be structured in a long format, with one row per condition per pair, whereas the corresponding McNemar test of marginal homogeneity requires data structured as one row per pair.\n\nAt least three options are given for the stratified common risk difference CI: Mantel-Haenszel (`CL=MH`), 'stratified' Newcombe (`CL=newcombe`) or 'summary score' (`CL=score`) intervals (See [here](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details63.htm) for equations), but some options give unexpected results, or no result at all. For the example data above, the default output gives the MH interval as (0.008764, 0.382541), which matches the Wald method, but the 'summary score' CI is (-0.0785, 0.1690) around a point estimate of 0.0452. These methods would not guarantee agreement with the McNemar test of marginal homogeneity.\n\nThere is no equivalent option in PROC FREQ for common relative risks for paired data.\n\n\\[Example code to be added for `%PAIRBINCI`\\]\n\n``` sas\ndata dat_used;\n do id = 1 to 20;\n trt = 'ACT'; resp = 'Yes'; output;\n trt = 'PBO'; resp = 'Yes'; output;\n end;\n do id = 21 to 35;\n trt = 'ACT'; resp = 'Yes'; output;\n trt = 'PBO'; resp = 'No'; output;\n end;\n do id = 36 to 41;\n trt = 'ACT'; resp = 'No'; output;\n trt = 'PBO'; resp = 'Yes'; output;\n end;\n do id = 42 to 46;\n trt = 'ACT'; resp = 'No'; output;\n trt = 'PBO'; resp = 'No'; output;\n end;\nrun;\n\nproc sort data=dat_used;\n by trt descending resp;\nrun;\n\nproc freq data=dat_used order=data; \n table id*trt*resp / commonriskdiff noprint; \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/riskdiff_stratified.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## References\n", - "supporting": [], + "markdown": "---\ntitle: \"Confidence intervals for Paired Proportions in SAS\"\nbibliography: references.bib\ncsl: nature.csl \n---\n\n# Introduction\n\nThis page covers confidence intervals for comparisons of two paired proportions in SAS. Note that PROC FREQ will give a McNemar test for such an analysis (via the `AGREE` option), but with no corresponding confidence interval. Some methods are described below to facilitate computing them programmatically. A SAS macro (`%PAIRBINCI`) can be downloaded from to obtain the Asymptotic Score methods, which have superior coverage properties.\n\nAnalysis may be based on the risk difference (RD) contrast $\\theta_{RD} = p_1 - p_2$ or relative risk (RR) $\\theta_{RR} = p_1 / p_2$. In the context of paired data, and of particular interest for case-control studies, the odds ratio (OR) is estimated conditional on the number of discordant pairs, calculated as $\\theta_{OR} = p_{12} / p_{21}$, resulting in confidence intervals that are simple transformations of a CI for a single proportion.\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\nYou may experience paired data in any of the following types of situation:\n\n- Tumour assesssments classified as Progressive Disease or Not Progressive Disease performed by an Investigator and separately by an independent panel.\n\n- A paired case-control study (each subject taking active treatment is matched to a patient taking control)\n\n- A cross-over trial where the same subjects take both medications\n\nIn all these cases, the calculated proportions for the 2 groups are not independent.\n\nUsing a cross-over study as our example, a 2 x 2 table can be formed as follows:\n\n+-----------------------+---------------+---------------+--------------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+==============+\n| Active Response = Yes | r | s | r+s |\n+-----------------------+---------------+---------------+--------------+\n| Active Response = No | t | u | t+u |\n+-----------------------+---------------+---------------+--------------+\n| Total | r+t | s+u | N = r+s+t+u |\n+-----------------------+---------------+---------------+--------------+\n\nThe table below indicates the proportions that are estimated from the data (note the difference in structure compared to the usual 2x2 table for independent proportions).\n\n+-----------------------+---------------+---------------+--------------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+==============+\n| Active Response = Yes | $p_{11}$ | $p_{12}$ | $p_1$ |\n+-----------------------+---------------+---------------+--------------+\n| Active Response = No | $p_{21}$ | $p_{22}$ | $(1-p_1)$ |\n+-----------------------+---------------+---------------+--------------+\n| Total | $p_2$ | $(1-p_2)$ | |\n+-----------------------+---------------+---------------+--------------+\n\nThe proportions of subjects responding on each treatment are:\n\nActive: $\\hat p_1 = (r+s)/N$ and Placebo: $\\hat p_2= (r+t)/N$\n\nThe estimated difference between the proportions for each treatment is: $D=\\hat p_1 - \\hat p_2 = (s-t)/N$\n\nThe estimated relative risk is $(r+s)/(r+t)$.\n\nThe estimated conditional odds ratio is $s/t$.\n\n# Data used\n\nWorked examples below use the following artificial dataset:\n\n+-----------------------+---------------+---------------+--------------+\n| | Placebo\\ | Placebo\\ | Total |\n| | Response= Yes | Response = No | |\n+=======================+===============+===============+==============+\n| Active Response = Yes | r = 20 | s = 15 | r+s = 35 |\n+-----------------------+---------------+---------------+--------------+\n| Active Response = No | t = 6 | u = 5 | t+u = 11 |\n+-----------------------+---------------+---------------+--------------+\n| Total | r+t = 26 | s+u = 20 | N = 46 |\n+-----------------------+---------------+---------------+--------------+\n\n# Methods for Calculating Confidence Intervals for Proportion Difference from matched pairs\n\n## Normal Approximation Method (Also known as the Wald Method)\n\nIn large random samples from paired data, the sampling distribution of the difference between two proportions is assumed to follow the normal distribution. The estimated SE for the difference and 95% confidence interval can be calculated using the following equations.\n\n$SE(D)=\\frac{1}{N} \\times \\sqrt{(s+t-\\frac{(s-t)^2}{N})} = \\sqrt{\\frac{1}{N}(\\hat p_{12} + \\hat p_{21} - (\\hat p_{12} - \\hat p_{21})^2)}$\n\n$D-z_{\\alpha/2} \\times SE(D)$ to $D+z_{\\alpha/2} \\times SE(D)$\n\nSo, for the example dataset,\n\nD = (15-6) /46 = 0.196\n\nSE(D) = 1/ 46 \\* sqrt (15+6- (((15-6)\\^2)/46) ) = 0.09535\n\nLower CI= 0.196 - 1.96 \\*0.0954 = 0.008764\n\nUpper CI = 0.196 + 1.96 \\* 0.0954 = 0.382541\n\nLike other Wald methods, this approach has been found to perform poorly, failing to achieve the nominal confidence level[@newcombe1998][@fagerland2014]. An adjustment was suggested by Bonett & Price, similar to Agresti-Caffo for independent proportions, using $N'=N+2$ instead of $N$, and $\\tilde p_{12} = (s+1)/N'$ and $\\tilde p_{21} = (r+1)/N'$, instead of $\\hat p_{12}$ and $\\hat p_{21}$.\n\n## Newcombe Method (Also known as the Hybrid Score method, Square-and-Add, or the Method of Variance Estimates Recovery (MOVER) )\n\nNote that Newcombe described this as a 'Score' method in his 1998 paper, but his later work labelled it as 'Square-and-add' - it is not really a score method, but a hybrid method obtained by combining Wilson Score intervals calculated separately for the two proportions.\n\nDerive the confidence intervals for each of the individual single samples 1 and 2, using the Wilson Method equations as described [here](ci_for_prop.html).\n\nLet $l_1$ = Lower CI for sample 1, and $u_1$ be the upper CI for sample 1.\n\nLet $l_2$ = Lower CI for sample 2, and $u_2$ be the upper CI for sample 2.\n\nWe then define $\\phi$ which is an estimate of the correlation coefficient, used to correct for $\\hat p_1$ and $\\hat p_2$ not being independent. As the samples are related, $\\phi$ is usually positive and thus makes the confidence interval smaller (narrower).\n\nIf any of r+s, t+u, r+t, s+u are zero, then set $\\phi$ to be 0.\n\nOtherwise we calculate A, B and C, and $\\phi=C / \\sqrt A$\n\nIn the above: $A=(r+s)(t+u)(r+t)(s+u)$ and $B=(ru-st)$\n\nTo calculate C follow the table below. N = sample size.\n\n| Condition of B | Set C equal to |\n|---------------------------|----------------|\n| If B is greater than N/2 | B - N/2 |\n| If B is between 0 and N/2 | 0 |\n| If B is less than 0 | B |\n\nLet $D = \\hat p_1 - \\hat p_2$ (the difference between the observed proportions of responders)\n\nThe CI for the paired difference between the proportions is: $D - \\sqrt{((\\hat p_1 - l_1)^2 - 2\\phi(\\hat p_1 - l_1)(u_2 - \\hat p_2) + (u_2 - \\hat p_2)^2 )}$ to\n\n$D + \\sqrt{((\\hat p_2 - l_2)^2 - 2\\phi(\\hat p_2 - l_2)(u_1 - \\hat p_1) + (u_1 - \\hat p_1)^2 )}$\n\nFirst using the Wilson Method equations for each of the individual single samples 1 and 2.\n\n| | Active | Placebo |\n|----------|---------------|---------------|\n| a | 73.842 | 55.842 |\n| b | 11.974 | 13.728 |\n| c | 99.683 | 99.683 |\n| Lower CI | 0.621 = $l_1$ | 0.422 = $l_2$ |\n| Upper CI | 0.861 = $u_1$ | 0.698 = $u_2$ |\n\n$A=(r+s)(t+u)(r+t)(s+u)$ = 9450000\n\nB = 10\n\nC = 0 (as B is between 0 and N/2)\n\n$\\phi$ = 0.\n\nHence the middle part of the equation simplifies to 0, and therefore in this case the interval is the same as the MOVER interval for independent proportions:\n\nLower CI = $D - \\sqrt{((\\hat p_1 - l_1)^2 + (u_2 - \\hat p_2)^2 )}$ = 0.196 - sqrt \\[ (0.761-0.621)\\^2 + (0.698-0.565) \\^2 \\]\n\nUpper CI = $D + \\sqrt{((\\hat p_2 - l_2)^2 + (u_1 - \\hat p_1)^2 )}$ = 0.196 + sqrt \\[ (0.565-0.422)\\^2 + (0.861-0.761) \\^2 \\]\n\nCI= 0.002602 to 0.369943\n\nOther variants of the MOVER method are possible, and achieve improved coverage properties, by replacing the Wilson method with a more centrally-located interval for the single proportions. For example, using the Jeffreys method gives a CI of (0.003161, 0.373479).\n\n## Asymptotic Score Methods\n\nA genuine score method for paired $\\theta_{RD}$, analogous to the Miettinen-Nurminen method for the unpaired difference, was developed in 1998[@tango1998], using the contrast function $S(\\theta) = \\hat p_1 - \\hat p_2 - \\theta$, together with its variance (accounting for the correlation) estimated at the constrained maximum likelihood estimates of the cell probabilities for each given value of $\\theta$. Generally this is not a method that would be hand-calculated - originally the approach involved an iterative process of evaluating the score function over the range of $\\theta$, but closed-form expressions are now available.\n\nA proposed skewness-corrected version (SCASu) and another with an added 'N-1' variance bias correction (SCAS) is currently under review for publication.\n\nThe Tango and SCAS methods are available for SAS via the `%PAIRBINCI` macro.\n\nFor the example dataset, the Tango CI is (0.000419, 0.377185), the SCASu version is (0.000423, 0.380312), and the SCAS CI is (-0.001869, 0.382194).\n\n# Methods for Calculating Confidence Intervals for Relative Risk from matched pairs\n\n## Normal Approximation Method (Also known as the Wald Method)\n\nThe normal approximation for $\\theta_{RR}$ involves the logged estimate $ln(\\hat \\theta_{RR})$ and its SE which is estimated as: $\\sqrt{\\frac{(s + t)}{(r+s)(r+t)}}$, with the resulting confidence limits being back-transformed to the ratio scale.\n\n$ln(\\hat \\theta_{RR})$ = ln(35/26) = 0.297\n\nSE = sqrt( 21 / (35\\*26) ) = 0.152\n\nLower CI= exp(0.297 - 1.96 \\* 0.152) = 0.99908\n\nUpper CI = exp(0.297 + 1.96 \\* 0.152) = 1.81289\n\n## MOVER-R Method (Also known as the Hybrid Score method or Square-and-Add)\n\nExtending the methodology for the Newcombe method for RD, this hybrid method combines CIs for the two single proportions (using either the Wilson or Jeffreys method), together with an estimate of their correlation coefficient.\n\n\\[More details & example to be added - not available in SAS\\]\n\nMOVER-Wilson CI: (1.003963, 1.84443). MOVER-Jeffreys CI: (1.004783, 1.851871)\n\n## Asymptotic Score Methods\n\nTwo score methods have also been developed for paired relative risk, which have been shown to be algebraically equivalent[@nam2002][@tang2003]. A proposed skewness-corrected version (SCASu) and another with an added 'N-1' variance bias correction (SCAS) is currently under review for publication.\n\nThese methods are available for SAS via the `%PAIRBINCI` macro. The Tang interval is (1.000639, 1.85982), the SCASu version is (1.00064, 1.867063) and the SCAS CI is (0.997176, 1.874279).\n\n# Methods for Calculating Confidence Intervals for Odds Ratio from matched pairs\n\nMost methods for the paired conditional odds ratio are based on calculating an interval $(L^*, U^*)$ for the single proportion $s/(s+t)$, using any of the methods described [here](ci_for_prop.html), and then transforming it as $(\\frac{L^*}{1-L^*}, \\frac{U^*}{1-U^*})$. Generally, the CIs for $\\theta_{OR}$ inherit the properties of the interval method selected for the single proportion.\n\n# Continuity Adjusted Methods\n\nContinuity adjustments can be incorporated into the formulae for most (if not all) of the above methods. These have been implemented for Tango/Tang, SCAS and MOVER methods in the ratesci package for R, but not yet added to the `%PAIRBINCI` macro.\n\n# Consistency with Hypothesis Tests\n\nThe Asymptotic Score methods without 'N-1' correction (Tango and SCASu methods for RD, and Tang/Nam-Blackwelder and SCASu methods for RR) are guaranteed to be consistent with the result of the McNemar test. The SCAS method comes with a corresponding test (as yet unpublished), which is a modified version of the McNemar test.\n\n# Example Code using PROC FREQ\n\nAs mentioned in the introduction, PROC FREQ does not currently have a procedure which outputs the above confidence intervals for matched proportions\\*. A macro (`%PAIRBINCI`) is available [here](https://github.com/petelaud/ratesci-sas){.uri} which provides the asymptotic score method by Tango, and a skewness corrected version (paper under review).\n\n\\*Actually that's not strictly true. A SAS Support knowledge base article [here](https://support.sas.com/kb/46/997.html) describes an approach using stratified analysis to obtain a CI for the 'common risk difference', which appears to give the Wald CI. However, this requires data to be structured in a long format, with one row per condition per pair, whereas the corresponding McNemar test of marginal homogeneity requires data structured as one row per pair.\n\nAt least three options are given for the stratified common risk difference CI: Mantel-Haenszel (`CL=MH`), 'stratified' Newcombe (`CL=newcombe`) or 'summary score' (`CL=score`) intervals (See [here](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details63.htm) for equations), but some options give unexpected results, or no result at all. For the example data above, the default output gives the MH interval as (0.008764, 0.382541), which matches the Wald method, but the 'summary score' CI is (-0.0785, 0.1690) around a point estimate of 0.0452. These methods would not guarantee agreement with the McNemar test of marginal homogeneity.\n\nThere is no equivalent option in PROC FREQ for common relative risks for paired data.\n\n\\[Example code to be added for `%PAIRBINCI`\\]\n\n```sas\ndata dat_used;\n do id = 1 to 20;\n trt = 'ACT'; resp = 'Yes'; output;\n trt = 'PBO'; resp = 'Yes'; output;\n end;\n do id = 21 to 35;\n trt = 'ACT'; resp = 'Yes'; output;\n trt = 'PBO'; resp = 'No'; output;\n end;\n do id = 36 to 41;\n trt = 'ACT'; resp = 'No'; output;\n trt = 'PBO'; resp = 'Yes'; output;\n end;\n do id = 42 to 46;\n trt = 'ACT'; resp = 'No'; output;\n trt = 'PBO'; resp = 'No'; output;\n end;\nrun;\n\nproc sort data=dat_used;\n by trt descending resp;\nrun;\n\nproc freq data=dat_used order=data; \n table id*trt*resp / commonriskdiff noprint; \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/riskdiff_stratified.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# References\n", + "supporting": [ + "ci_for_paired_prop_files" + ], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/SAS/ci_for_prop/execute-results/html.json b/_freeze/SAS/ci_for_prop/execute-results/html.json index b7a067a15..df2a52f3f 100644 --- a/_freeze/SAS/ci_for_prop/execute-results/html.json +++ b/_freeze/SAS/ci_for_prop/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "c6b5ded082e594f3056a45f595f14a5e", + "hash": "b5aaa8eec6665b55ce5986c3b9b81091", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Confidence intervals for a Proportion in SAS\"\n---\n\n## Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\n## Data Used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `Act` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n``` sas\ndata adcibc2 (keep=trt resp) ;\n set adcibc; \n if aval gt 4 then resp=\"Yes\";\n else resp=\"No\"; \n if trtp=\"Placebo\" then trt=\"PBO\";\n else trt=\"Act\"; \nrun;\n```\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects = 0.2338 (23.38% responders).\n\n``` sas\nproc freq data=adcibc2;\n table trt*resp/ nopct nocol;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/2by2crosstab.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Methods for Calculating Confidence Intervals for a Single Proportion\n\nHere we are calculating a $100(1-\\alpha)\\%$ (usually 95%) confidence interval for the proportion $p$ of responders in the active treatment group, estimated from the sample proportion $\\hat p = x / n$.\n\nSAS PROC FREQ in Version 9.4 can compute 11 methods to calculate CIs for a single proportion, an explanation of most of these methods and the code is shown below. See [BINOMIAL](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect010.htm)^1^ for more information on SAS parameterization. It is recommended to always sort your data prior to doing a PROC FREQ.\n\nFor more information about some of these methods in R & SAS, including which performs better in different scenarios see [Five Confidence Intervals for Proportions That You Should Know about](https://towardsdatascience.com/five-confidence-intervals-for-proportions-that-you-should-know-about-7ff5484c024f)^2^ and [Confidence Intervals for Binomial Proportion Using SAS](https://www.lexjansen.com/sesug/2015/103_Final_PDF.pdf)^3^. Key literature on the subject includes papers by [Brown et al.](https://www.jstor.org/stable/2676784?seq=1)^4^ and [Newcombe](https://pubmed.ncbi.nlm.nih.gov/9595616/)^5^.\n\n### Clopper-Pearson (Exact or binomial CI) Method\n\nWith binary endpoint data (response/non-response), we make the assumption that the proportion of responders has been derived from a series of Bernoulli trials. Trials (Subjects) are independent and we have a fixed number of repeated trials with an outcome of respond or not respond. This type of data follows the discrete binomial probability distribution, and the Clopper-Pearson^6^ ('exact') method uses this distribution to calculate the CIs.\n\nThis method guarantees strictly conservative coverage, but has been noted to be excessively conservative, as for any given proportion, the actual coverage probability can be much larger than $(1-\\alpha)$.\n\nThe Clopper-Pearson method is output by SAS as one of the default methods (labelled as \"Exact Conf Limits\" in the \"Proportion\" ODS output object), but you can also specify it using `BINOMIAL(LEVEL=\"Yes\" CL=CLOPPERPEARSON);` which creates a separate output object \"ProportionCLs\".\n\n### Normal Approximation Method (Also known as the Wald Method)\n\nThe traditional alternative to the Clopper-Pearson ('Exact') method is the asymptotic Normal Approximation (Wald) CI. The poor performance of this method is well documented - it can fail to achieve the nominal confidence level even with large sample sizes, and there is a consensus in the literature that it should be avoided^4, p128^. Nevertheless, it remains a default output component in SAS, so is included here for reference.\n\nIn large random samples from independent trials, the sampling distribution of proportions approximately follows the normal distribution. The expectation of a sample proportion is the corresponding population proportion. Therefore, based on a sample of size $n$, a $(1-\\alpha)\\%$ confidence interval for population proportion can be calculated using the normal approximation as follows:\n\n$p\\approx \\hat p \\pm z_{\\alpha/2} \\sqrt{\\hat p(1-\\hat p)/n}$, where $\\hat p=x/n$ is the sample proportion, $z_{\\alpha/2}$ is the $1-\\alpha/2$ quantile of a standard normal distribution corresponding to the confidence level $(1-\\alpha)$, and $\\sqrt{\\hat p(1-\\hat p)/n}$ is the estimated standard error.\n\nOne should note that the approximation can become increasingly unreliable as the proportion of responders gets closer to 0 or 1 (e.g. 0 or 100% responding). In this scenario, common issues consist of:\n\n- it does not respect the 0 and 1 proportion boundary (so you can get a lower CI of -0.1 or an upper CI of 1.1!)\n\n- the derived 95% CI may cover the true proportion substantially less than 95% of the time\n\n- the left and right tail probabilities of the 95% CI are asymmetrical, such that there is generally more (and often substantially more) than 2.5% chance of the interval not covering the true proportion at one end (as observed in Newcombe^5^). Note that symmetrical or \"equal-tailed\" coverage (or what Newcombe calls \"central interval location\") has direct relevance to the type 1 error for non-inferiority hypothesis tests, but is also a generally desirable property.\n\nAlthough these undesirable features become more severe for proportions close to 0 or 1, they also occur more generally for the Wald interval, and any of the alternative methods should be preferred instead.\n\nThe Wald method can be derived with or without a Yates \"continuity correction\". In principle, this correction is intended to approximate the conservative coverage of the Clopper-Pearson method, but it fails to achieve the minimum coverage criterion, and tail probabilities remain imbalanced. (Note that in general, there is some debate over the use of the term \"correction\" - particularly when applied to other methods, the adjustment produces coverage probabilities that are further away from the nominal confidence level to achieve strictly conservative coverage, which may or may not be more \"correct\", depending on your point of view.)\n\nThe Wald normal approximation method is output by SAS as a default method, but you can also specify it using `BINOMIAL(LEVEL=\"Yes\" CL=WALD);`\n\nThe \"continuity corrected\" version is obtained using `BINOMIAL(LEVEL=\"Yes\" CL=WALD(CORRECT));`\n\n### Wilson method (Also known as the (Asymptotic) Score method)^7^\n\nThe Wilson (Score) method is also based on an asymptotic normal approximation, but uses a score statistic that replaces the estimated variance $\\hat V(\\hat p)=\\hat p(1-\\hat p)/n$ with the true variance $V(\\hat p)=p(1-p)/n$. The resulting score is then rearranged to a quadratic equation and solved for p for a given $\\alpha$. This method resolves many of the issues affecting the Wald method - it avoids boundary violations, and achieves coverage probabilities close to the nominal level (on average). However, it over-corrects the asymmetric coverage of Wald - the location of the Wilson interval is shifted too far towards 0.5.\n\nThe method can be derived with or without a Yates continuity correction. The corrected interval closely approximates the coverage of the Clopper-Pearson method, but only in terms of overall two-sided coverage - due to the asymmetric coverage, it does not guarantee that the non-coverage probability is less than $\\alpha/2$.\n\nLet $\\hat p$ =r/n, where r= number of responses, and n=number of subjects, $\\hat q = 1- \\hat p$, and z= the appropriate value from standard normal distribution: $z_{1-\\alpha/2}$.\n\nFor example, for 95% confidence intervals, $\\alpha=0.05$, using standard normal tables, z in the equations below will take the value =1.96. Calculate 3 quantities\n\n$$ A= 2r+z^2$$\n\n$$ B=z\\sqrt(z^2 + 4r \\hat q) $$ $$ C=2(n+z^2) $$The method calculates the confidence interval \\[Lower, Upper\\] as: \\[(A-B)/C, (A+B)/C\\]\n\nA = 2 \\* 36 + 1.96\\^2 = 75.8416\n\nB = 1.96 \\* sqrt (1.96\\^2 + 4 x 36 x 0.7662) = 20.9435\n\nC = 2\\* (154+1.96\\^2) = 315.6832\n\nLower interval = A-B/C = 75.8416 - 20.9435 / 315.6832 = 0.17390\n\nUpper interval = A+B/C = 75.8416 + 20.9435 / 315.6832 = 0.30659\n\nCI = 0.17390 to 0.30659\n\nThe Wilson (score) method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=Wilson);`\n\nThe continuity corrected Wilson method is specified using `BINOMIAL(LEVEL=\"Yes\" CL=WILSON(CORRECT));`\n\nThe only differences in the equations to calculate the Wilson score with continuity correction is that the equations for A and B are changed as follows:\n\n$$ A= 2r+z^2 -1$$\n\n$$ B=z\\sqrt(z^2 - 2 -\\frac{1}{n} + 4r \\hat q) $$\n\n### Agresti-Coull method\n\nThe Agresti-Coull method is a 'simple solution' designed to improve coverage compared to the Wald method and still perform better (i.e. less conservative) than Clopper-Pearson particularly when the probability isn't in the mid-range (0.5). It is less conservative whilst still having good coverage. The only difference compared to the Wald method is that it adds $z^2/2$ successes and failures to the original observations (when $\\alpha=0.05$ this increases the sample by approximately 4 observations).\n\nThe Agresti-Coull method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=AGRESTICOULL);`\n\n### Jeffreys method\n\nThe Jeffreys 'equal-tailed' method is a particular type of Bayesian method, which optimises central location instead of Highest Probability Density (HPD). For binomial proportions, the beta distribution is generally used for the conjugate prior, which consists of two parameters $\\alpha'$ and $\\beta'$. Setting $\\alpha'=\\beta'=0.5$ is called the Jeffreys prior. This is considered as non-informative for a binomial proportion. The resulting posterior density gives the CI as\n\n$$\n(Beta (x + 0.5, n - x + 0.5)_{\\alpha/2}, Beta (x + 0.5, n - x + 0.5)_{1-\\alpha/2})\n$$\n\nBoundary modifications are applied to force the lower limit to 0 if x=0 and the upper limit to 1 if x=n.\n\nThe coverage probabilities of the Jeffreys method are centred around the nominal confidence level on average, with symmetric \"equal-tailed\" 1-sided coverage^8 Appx S3.5^. This interval is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=Jeffreys);`\n\n### Binomial based Mid-P method\n\nThe mid-P method is similar to the Clopper-Pearson method, in the sense that it is based on exact calculations from the binomial probability distribution, but it aims to reduce the conservatism. It's quite a complex method to compute compared to the methods above and rarely used in practice *\\[Ed: what source is there for this statement?\\]*. However, like the Jeffreys interval, it has excellent 1-sided and 2-sided coverage properties for those seeking to align mean coverage with the nominal confidence level^9^.\n\nThe mid-P method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=MIDP);`\n\n### Blaker method^10^\n\nThe Blaker method is a less conservative alternative to the Clopper-Pearson exact CI. It derives the CI by inverting the p-value function of a 2-sided exact test, so it achieves strictly conservative 2-sided coverage, but as a result the 1-sided coverage is not strictly conservative.\n\nThe Clopper-Pearson CI's are always wider and contain the Blaker CI limits. It's adoption has been limited due to the numerical algorithm taking longer to compute compared to some of the other methods especially when the sample size is large. NOTE: Klaschka and Reiczigel^11^ is yet another adaptation of this method.\n\nThe Blaker method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=BLAKER);`\n\n## Continuity Adjusted Methods\n\nSAS offers the 'CORRECT' option for producing more conservative versions of the Wald and Wilson Score intervals. These are intended to emulate the strictly conservative coverage of the 'exact' method, but often fail to do so. In particular, due to the skewed coverage of the Wilson interval, the 'corrected' version has one-sided coverage that exceeds the nominal $\\alpha/2$ for extreme values of p.\n\n## Consistency with hypothesis tests\n\nWithin SAS PROC FREQ for the asymptotic methods, consistency with the binomial test is provided by the Wilson CI, because the asymptotic test uses the standard error estimated under the null hypothesis (i.e. the value specified in the `BINOMIAL(P=...)` option of the `TABLES` statement.\n\nIf an `EXACT BINOMIAL;` statement is used, the resulting test is consistent with the Clopper-Pearson CI.\n\nAn exact hypothesis test with mid-P adjustment (consistent with the mid-P CI) is available via `EXACT BINOMIAL / MIDP;` but the output only gives the one-sided p-value, so if a 2-sided test is required it needs to be manually calculated by doubling the one-sided mid p-value.\n\n## Example Code using PROC FREQ\n\nBy adding the option `BINOMIAL(LEVEL=\"Yes\")` to your 'PROC FREQ' TABLES statement, SAS outputs the Normal Approximation (Wald) and Clopper-Pearson (Exact) confidence intervals as two default methods, derived for the `Responders` = `Yes`. If you do not specify the `LEVEL` you want to model, then SAS assumes you want to model the first level that appears in the output (alphabetically).\n\n**It is very important to ensure you are calculating the CI for the correct level! Check your output to confirm, you will see below it states `resp=Yes` !**\n\nCaution is required if there are no responders in a group (aside from any issues with the choice of confidence interval method), as SAS PROC FREQ (as of v9.4) does not output any confidence intervals in this case. If the `LEVEL` option has been specified, an error is produced, otherwise the procedure by default generates CIs for the proportion of non-responders. Note that valid CIs can (and should) be obtained for both p = 0/n and p = n/n. If needed, the interval for 0/n can be derived as 1 minus the transposed interval for n/n.\n\nThe output consists of the proportion of resp=Yes, the Asymptotic SE, 95% CIs using normal-approximation method, 95% CI using the Clopper-Pearson method, and then a Binomial test statistic and p-value for the null hypothesis of H0: Proportion = 0.5.\n\n``` sas\nproc sort data=adcibc2;\n by trt; \nrun; \n\nproc freq data=adcibc2; \n table resp/ nopct nocol BINOMIAL(LEVEL=\"Yes\");\n by trt;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_prop_pbo.png){fig-align='center' width=50%}\n:::\n:::\n\n\nBy adding the option `BINOMIAL(LEVEL=\"Yes\" CL=)`, the other CIs are output as shown below. You can list any number of the available methods within the BINOMIAL option CL=XXXX separated by a space. However, SAS will only calculate the WILSON and WALD or the WILSON(CORRECT) and WALD(CORRECT). SAS won't output them both from the same procedure.\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=CLOPPERPEARSON WALD WILSON AGRESTICOULL JEFFREYS MIDP LIKELIHOODRATIO LOGIT BLAKER)` will return Agresti-Coull, Blaker, Clopper-Pearson(Exact), Wald(without continuity correction) Wilson(without continuity correction), Jeffreys, Mid-P, Likelihood Ratio, and Logit\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=ALL);` will return Agresti-Coull, Clopper-Pearson (Exact), Jeffreys, Wald(without continuity correction), Wilson (without continuity correction). *\\[If the developers of SAS are reading this, it would seem more natural/logical for Mid-P to be included here instead of Clopper-Pearson! However, note that the ALL option is not mentioned in the current PROC FREQ documentation.\\]*\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=ALL CORRECT);`will return Agresti-Coull, Clopper-Pearson (Exact), Jeffreys, Wald (with continuity correction), Wilson(with continuity correction). In previous versions of SAS this was coded as `BINOMIALc(...)`, but that is no longer documented. *\\[... following the above comment, it is also not logical for Jeffreys and Agresti-Coull to be included here alongside methods designed to achieve conservative coverage\\]*\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=WILSON(CORRECT) WALD(CORRECT));`will return Wilson (with continuity correction) and Wald (with continuity correction)\n\n``` sas\nproc freq data=adcibc2;\n table resp/ nopct nocol \n BINOMIAL(LEVEL = \"Yes\" \n CL = WALD WILSON \n AGRESTICOULL JEFFREYS MIDP \n LIKELIHOODRATIO LOGIT);\n by trt;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_prop_all_act.png){fig-align='center' width=50%}\n:::\n:::\n\n\n``` sas\nproc freq data=adcibc2;\n table resp/ nopct nocol \n BINOMIAL(LEVEL=\"Yes\" \n CORRECT\n CL= WALD WILSON \n CLOPPERPEARSON BLAKER);\n by trt; \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_prop_cc_act.png){fig-align='center' width=50%}\n:::\n:::\n\n\nSAS output often rounds to 3 or 4 decimal places in the output window, however the full values can be obtained using SAS ODS statements. `ods output binomialcls=bcl;` and then using the bcl dataset, in a data step to put the variable out to the number of decimal places we require.\\\n10 decimal places shown here: `lowercl2 = put(lowercl, 12.10);`\n\n## References\n\n1. [SAS PROC FREQ here](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect010.htm) and [here](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_freq_sect028.htm)\n\n2. [Five Confidence Intervals for Proportions That You Should Know about](https://towardsdatascience.com/five-confidence-intervals-for-proportions-that-you-should-know-about-7ff5484c024f)\n\n3. [Confidence intervals for Binomial Proportion Using SAS](https://www.lexjansen.com/sesug/2015/103_Final_PDF.pdf)\n\n4. Brown LD, Cai TT, DasGupta A (2001). \"Interval estimation for a binomial proportion\", Statistical Science 16(2):101-133\n\n5. Newcombe RG (1998). \"Two-sided confidence intervals for the single proportion: comparison of seven methods\", Statistics in Medicine 17(8):857-872\n\n6. Clopper,C.J.,and Pearson,E.S.(1934),\"The Use of Confidence or Fiducial Limits Illustrated in the Case of the Binomial\", Biometrika 26, 404--413.\n\n7. D. Altman, D. Machin, T. Bryant, M. Gardner (eds). Statistics with Confidence: Confidence Intervals and Statistical Guidelines, 2nd edition. John Wiley and Sons 2000.\n\n8. Laud PJ (2017) Equal-tailed confidence intervals for comparison of rates. Pharmaceutical Statistics 16: 334-348\n\n9. Laud PJ (2018) Corrigendum: Equal-tailed confidence intervals for comparison of rates. Pharmaceutical Statistics 17: 290-293\n\n10. Blaker, H. (2000). Confidence curves and improved exact confidence intervals for discrete distributions, Canadian Journal of Statistics 28 (4), 783--798\n\n11. Klaschka, J. and Reiczigel, J. (2021). \"On matching confidence intervals and tests for some discrete distributions: Methodological and computational aspects,\" Computational Statistics, 36, 1775--1790.\n", + "markdown": "---\ntitle: \"Confidence intervals for a Proportion in SAS\"\n---\n\n## Introduction\n\nSee the [summary page](../method_summary/ci_for_prop_intro.html) for general introductory information on confidence intervals for proportions, including the principles underlying the most common methods.\n\n## Data Used\n\nThe adcibc data stored [here](../data/adcibc.csv) was used in this example, creating a binary treatment variable `trt` taking the values of `Act` or `PBO` and a binary response variable `resp` taking the values of `Yes` or `No`. For this example, a response is defined as a score greater than 4.\n\n```sas\ndata adcibc2 (keep=trt resp) ;\n set adcibc; \n if aval gt 4 then resp=\"Yes\";\n else resp=\"No\"; \n if trtp=\"Placebo\" then trt=\"PBO\";\n else trt=\"Act\"; \nrun;\n```\n\nThe below shows that for the Active Treatment, there are 36 responders out of 154 subjects = 0.2338 (23.38% responders).\n\n```sas\nproc freq data=adcibc2;\n table trt*resp/ nopct nocol;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/2by2crosstab.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Methods for Calculating Confidence Intervals for a Single Proportion\n\nHere we are calculating a $100(1-\\alpha)\\%$ (usually 95%) confidence interval for the proportion $p$ of responders in the active treatment group, estimated from the sample proportion $\\hat p = x / n$.\n\nSAS PROC FREQ in Version 9.4 can compute 11 methods to calculate CIs for a single proportion, an explanation of most of these methods and the code is shown below. See [BINOMIAL](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect010.htm)^1^ for more information on SAS parameterization. It is recommended to always sort your data prior to doing a PROC FREQ.\n\nFor more information about some of these methods in R & SAS, including which performs better in different scenarios see [Five Confidence Intervals for Proportions That You Should Know about](https://towardsdatascience.com/five-confidence-intervals-for-proportions-that-you-should-know-about-7ff5484c024f)^2^ and [Confidence Intervals for Binomial Proportion Using SAS](https://www.lexjansen.com/sesug/2015/103_Final_PDF.pdf)^3^. Key literature on the subject includes papers by [Brown et al.](https://www.jstor.org/stable/2676784?seq=1)^4^ and [Newcombe](https://pubmed.ncbi.nlm.nih.gov/9595616/)^5^.\n\n### Clopper-Pearson (Exact or binomial CI) Method\n\nWith binary endpoint data (response/non-response), we make the assumption that the proportion of responders has been derived from a series of Bernoulli trials. Trials (Subjects) are independent and we have a fixed number of repeated trials with an outcome of respond or not respond. This type of data follows the discrete binomial probability distribution, and the Clopper-Pearson^6^ ('exact') method uses this distribution to calculate the CIs.\n\nThis method guarantees strictly conservative coverage, but has been noted to be excessively conservative, as for any given proportion, the actual coverage probability can be much larger than $(1-\\alpha)$.\n\nThe Clopper-Pearson method is output by SAS as one of the default methods (labelled as \"Exact Conf Limits\" in the \"Proportion\" ODS output object), but you can also specify it using `BINOMIAL(LEVEL=\"Yes\" CL=CLOPPERPEARSON);` which creates a separate output object \"ProportionCLs\".\n\n### Normal Approximation Method (Also known as the Wald Method)\n\nThe traditional alternative to the Clopper-Pearson ('Exact') method is the asymptotic Normal Approximation (Wald) CI. The poor performance of this method is well documented - it can fail to achieve the nominal confidence level even with large sample sizes, and there is a consensus in the literature that it should be avoided^4, p128^. Nevertheless, it remains a default output component in SAS, so is included here for reference.\n\nIn large random samples from independent trials, the sampling distribution of proportions approximately follows the normal distribution. The expectation of a sample proportion is the corresponding population proportion. Therefore, based on a sample of size $n$, a $(1-\\alpha)\\%$ confidence interval for population proportion can be calculated using the normal approximation as follows:\n\n$p\\approx \\hat p \\pm z_{\\alpha/2} \\sqrt{\\hat p(1-\\hat p)/n}$, where $\\hat p=x/n$ is the sample proportion, $z_{\\alpha/2}$ is the $1-\\alpha/2$ quantile of a standard normal distribution corresponding to the confidence level $(1-\\alpha)$, and $\\sqrt{\\hat p(1-\\hat p)/n}$ is the estimated standard error.\n\nOne should note that the approximation can become increasingly unreliable as the proportion of responders gets closer to 0 or 1 (e.g. 0 or 100% responding). In this scenario, common issues consist of:\n\n- it does not respect the 0 and 1 proportion boundary (so you can get a lower CI of -0.1 or an upper CI of 1.1!)\n\n- the derived 95% CI may cover the true proportion substantially less than 95% of the time\n\n- the left and right tail probabilities of the 95% CI are asymmetrical, such that there is generally more (and often substantially more) than 2.5% chance of the interval not covering the true proportion at one end (as observed in Newcombe^5^). Note that symmetrical or \"equal-tailed\" coverage (or what Newcombe calls \"central interval location\") has direct relevance to the type 1 error for non-inferiority hypothesis tests, but is also a generally desirable property.\n\nAlthough these undesirable features become more severe for proportions close to 0 or 1, they also occur more generally for the Wald interval, and any of the alternative methods should be preferred instead.\n\nThe Wald method can be derived with or without a Yates \"continuity correction\". In principle, this correction is intended to approximate the conservative coverage of the Clopper-Pearson method, but it fails to achieve the minimum coverage criterion, and tail probabilities remain imbalanced. (Note that in general, there is some debate over the use of the term \"correction\" - particularly when applied to other methods, the adjustment produces coverage probabilities that are further away from the nominal confidence level to achieve strictly conservative coverage, which may or may not be more \"correct\", depending on your point of view.)\n\nThe Wald normal approximation method is output by SAS as a default method, but you can also specify it using `BINOMIAL(LEVEL=\"Yes\" CL=WALD);`\n\nThe \"continuity corrected\" version is obtained using `BINOMIAL(LEVEL=\"Yes\" CL=WALD(CORRECT));`\n\n### Wilson method (Also known as the (Asymptotic) Score method)^7^\n\nThe Wilson (Score) method is also based on an asymptotic normal approximation, but uses a score statistic that replaces the estimated variance $\\hat V(\\hat p)=\\hat p(1-\\hat p)/n$ with the true variance $V(\\hat p)=p(1-p)/n$. The resulting score is then rearranged to a quadratic equation and solved for p for a given $\\alpha$. This method resolves many of the issues affecting the Wald method - it avoids boundary violations, and achieves coverage probabilities close to the nominal level (on average). However, it over-corrects the asymmetric coverage of Wald - the location of the Wilson interval is shifted too far towards 0.5.\n\nThe method can be derived with or without a Yates continuity correction. The corrected interval closely approximates the coverage of the Clopper-Pearson method, but only in terms of overall two-sided coverage - due to the asymmetric coverage, it does not guarantee that the non-coverage probability is less than $\\alpha/2$.\n\nLet $\\hat p$ =r/n, where r= number of responses, and n=number of subjects, $\\hat q = 1- \\hat p$, and z= the appropriate value from standard normal distribution: $z_{1-\\alpha/2}$.\n\nFor example, for 95% confidence intervals, $\\alpha=0.05$, using standard normal tables, z in the equations below will take the value =1.96. Calculate 3 quantities\n\n$$ A= 2r+z^2$$\n\n$$ B=z\\sqrt(z^2 + 4r \\hat q) $$ $$ C=2(n+z^2) $$The method calculates the confidence interval \\[Lower, Upper\\] as: \\[(A-B)/C, (A+B)/C\\]\n\nA = 2 \\* 36 + 1.96\\^2 = 75.8416\n\nB = 1.96 \\* sqrt (1.96\\^2 + 4 x 36 x 0.7662) = 20.9435\n\nC = 2\\* (154+1.96\\^2) = 315.6832\n\nLower interval = A-B/C = 75.8416 - 20.9435 / 315.6832 = 0.17390\n\nUpper interval = A+B/C = 75.8416 + 20.9435 / 315.6832 = 0.30659\n\nCI = 0.17390 to 0.30659\n\nThe Wilson (score) method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=Wilson);`\n\nThe continuity corrected Wilson method is specified using `BINOMIAL(LEVEL=\"Yes\" CL=WILSON(CORRECT));`\n\nThe only differences in the equations to calculate the Wilson score with continuity correction is that the equations for A and B are changed as follows:\n\n$$ A= 2r+z^2 -1$$\n\n$$ B=z\\sqrt(z^2 - 2 -\\frac{1}{n} + 4r \\hat q) $$\n\n### Agresti-Coull method\n\nThe Agresti-Coull method is a 'simple solution' designed to improve coverage compared to the Wald method and still perform better (i.e. less conservative) than Clopper-Pearson particularly when the probability isn't in the mid-range (0.5). It is less conservative whilst still having good coverage. The only difference compared to the Wald method is that it adds $z^2/2$ successes and failures to the original observations (when $\\alpha=0.05$ this increases the sample by approximately 4 observations).\n\nThe Agresti-Coull method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=AGRESTICOULL);`\n\n### Jeffreys method\n\nThe Jeffreys 'equal-tailed' method is a particular type of Bayesian method, which optimises central location instead of Highest Probability Density (HPD). For binomial proportions, the beta distribution is generally used for the conjugate prior, which consists of two parameters $\\alpha'$ and $\\beta'$. Setting $\\alpha'=\\beta'=0.5$ is called the Jeffreys prior. This is considered as non-informative for a binomial proportion. The resulting posterior density gives the CI as\n\n$$\n(Beta (x + 0.5, n - x + 0.5)_{\\alpha/2}, Beta (x + 0.5, n - x + 0.5)_{1-\\alpha/2})\n$$\n\nBoundary modifications are applied to force the lower limit to 0 if x=0 and the upper limit to 1 if x=n.\n\nThe coverage probabilities of the Jeffreys method are centred around the nominal confidence level on average, with symmetric \"equal-tailed\" 1-sided coverage^8 Appx S3.5^. This interval is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=Jeffreys);`\n\n### Binomial based Mid-P method\n\nThe mid-P method is similar to the Clopper-Pearson method, in the sense that it is based on exact calculations from the binomial probability distribution, but it aims to reduce the conservatism. It's quite a complex method to compute compared to the methods above and rarely used in practice *\\[Ed: what source is there for this statement?\\]*. However, like the Jeffreys interval, it has excellent 1-sided and 2-sided coverage properties for those seeking to align mean coverage with the nominal confidence level^9^.\n\nThe mid-P method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=MIDP);`\n\n### Blaker method^10^\n\nThe Blaker method is a less conservative alternative to the Clopper-Pearson exact CI. It derives the CI by inverting the p-value function of a 2-sided exact test, so it achieves strictly conservative 2-sided coverage, but as a result the 1-sided coverage is not strictly conservative.\n\nThe Clopper-Pearson CI's are always wider and contain the Blaker CI limits. It's adoption has been limited due to the numerical algorithm taking longer to compute compared to some of the other methods especially when the sample size is large. NOTE: Klaschka and Reiczigel^11^ is yet another adaptation of this method.\n\nThe Blaker method is output by SAS using `BINOMIAL(LEVEL=\"Yes\" CL=BLAKER);`\n\n## Continuity Adjusted Methods\n\nSAS offers the 'CORRECT' option for producing more conservative versions of the Wald and Wilson Score intervals. These are intended to emulate the strictly conservative coverage of the 'exact' method, but often fail to do so. In particular, due to the skewed coverage of the Wilson interval, the 'corrected' version has one-sided coverage that exceeds the nominal $\\alpha/2$ for extreme values of p.\n\n## Consistency with hypothesis tests\n\nWithin SAS PROC FREQ for the asymptotic methods, consistency with the binomial test is provided by the Wilson CI, because the asymptotic test uses the standard error estimated under the null hypothesis (i.e. the value specified in the `BINOMIAL(P=...)` option of the `TABLES` statement.\n\nIf an `EXACT BINOMIAL;` statement is used, the resulting test is consistent with the Clopper-Pearson CI.\n\nAn exact hypothesis test with mid-P adjustment (consistent with the mid-P CI) is available via `EXACT BINOMIAL / MIDP;` but the output only gives the one-sided p-value, so if a 2-sided test is required it needs to be manually calculated by doubling the one-sided mid p-value.\n\n## Example Code using PROC FREQ\n\nBy adding the option `BINOMIAL(LEVEL=\"Yes\")` to your 'PROC FREQ' TABLES statement, SAS outputs the Normal Approximation (Wald) and Clopper-Pearson (Exact) confidence intervals as two default methods, derived for the `Responders` = `Yes`. If you do not specify the `LEVEL` you want to model, then SAS assumes you want to model the first level that appears in the output (alphabetically).\n\n**It is very important to ensure you are calculating the CI for the correct level! Check your output to confirm, you will see below it states `resp=Yes` !**\n\nCaution is required if there are no responders in a group (aside from any issues with the choice of confidence interval method), as SAS PROC FREQ (as of v9.4) does not output any confidence intervals in this case. If the `LEVEL` option has been specified, an error is produced, otherwise the procedure by default generates CIs for the proportion of non-responders. Note that valid CIs can (and should) be obtained for both p = 0/n and p = n/n. If needed, the interval for 0/n can be derived as 1 minus the transposed interval for n/n.\n\nThe output consists of the proportion of resp=Yes, the Asymptotic SE, 95% CIs using normal-approximation method, 95% CI using the Clopper-Pearson method, and then a Binomial test statistic and p-value for the null hypothesis of H0: Proportion = 0.5.\n\n```sas\nproc sort data=adcibc2;\n by trt; \nrun; \n\nproc freq data=adcibc2; \n table resp/ nopct nocol BINOMIAL(LEVEL=\"Yes\");\n by trt;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_prop_pbo.png){fig-align='center' width=50%}\n:::\n:::\n\n\nBy adding the option `BINOMIAL(LEVEL=\"Yes\" CL=)`, the other CIs are output as shown below. You can list any number of the available methods within the BINOMIAL option CL=XXXX separated by a space. However, SAS will only calculate the WILSON and WALD or the WILSON(CORRECT) and WALD(CORRECT). SAS won't output them both from the same procedure.\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=CLOPPERPEARSON WALD WILSON AGRESTICOULL JEFFREYS MIDP LIKELIHOODRATIO LOGIT BLAKER)` will return Agresti-Coull, Blaker, Clopper-Pearson(Exact), Wald(without continuity correction) Wilson(without continuity correction), Jeffreys, Mid-P, Likelihood Ratio, and Logit\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=ALL);` will return Agresti-Coull, Clopper-Pearson (Exact), Jeffreys, Wald(without continuity correction), Wilson (without continuity correction). *\\[If the developers of SAS are reading this, it would seem more natural/logical for Mid-P to be included here instead of Clopper-Pearson! However, note that the ALL option is not mentioned in the current PROC FREQ documentation.\\]*\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=ALL CORRECT);`will return Agresti-Coull, Clopper-Pearson (Exact), Jeffreys, Wald (with continuity correction), Wilson(with continuity correction). In previous versions of SAS this was coded as `BINOMIALc(...)`, but that is no longer documented. *\\[... following the above comment, it is also not logical for Jeffreys and Agresti-Coull to be included here alongside methods designed to achieve conservative coverage\\]*\n\n- `BINOMIAL(LEVEL=\"Yes\" CL=WILSON(CORRECT) WALD(CORRECT));`will return Wilson (with continuity correction) and Wald (with continuity correction)\n\n```sas\nproc freq data=adcibc2;\n table resp/ nopct nocol \n BINOMIAL(LEVEL = \"Yes\" \n CL = WALD WILSON \n AGRESTICOULL JEFFREYS MIDP \n LIKELIHOODRATIO LOGIT);\n by trt;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_prop_all_act.png){fig-align='center' width=50%}\n:::\n:::\n\n\n```sas\nproc freq data=adcibc2;\n table resp/ nopct nocol \n BINOMIAL(LEVEL=\"Yes\" \n CORRECT\n CL= WALD WILSON \n CLOPPERPEARSON BLAKER);\n by trt; \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ci_for_prop/binomial_prop_cc_act.png){fig-align='center' width=50%}\n:::\n:::\n\n\nSAS output often rounds to 3 or 4 decimal places in the output window, however the full values can be obtained using SAS ODS statements. `ods output binomialcls=bcl;` and then using the bcl dataset, in a data step to put the variable out to the number of decimal places we require.\\\n10 decimal places shown here: `lowercl2 = put(lowercl, 12.10);`\n\n## References\n\n1. [SAS PROC FREQ here](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect010.htm) and [here](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_freq_sect028.htm)\n\n2. [Five Confidence Intervals for Proportions That You Should Know about](https://towardsdatascience.com/five-confidence-intervals-for-proportions-that-you-should-know-about-7ff5484c024f)\n\n3. [Confidence intervals for Binomial Proportion Using SAS](https://www.lexjansen.com/sesug/2015/103_Final_PDF.pdf)\n\n4. Brown LD, Cai TT, DasGupta A (2001). \"Interval estimation for a binomial proportion\", Statistical Science 16(2):101-133\n\n5. Newcombe RG (1998). \"Two-sided confidence intervals for the single proportion: comparison of seven methods\", Statistics in Medicine 17(8):857-872\n\n6. Clopper,C.J.,and Pearson,E.S.(1934),\"The Use of Confidence or Fiducial Limits Illustrated in the Case of the Binomial\", Biometrika 26, 404--413.\n\n7. D. Altman, D. Machin, T. Bryant, M. Gardner (eds). Statistics with Confidence: Confidence Intervals and Statistical Guidelines, 2nd edition. John Wiley and Sons 2000.\n\n8. Laud PJ (2017) Equal-tailed confidence intervals for comparison of rates. Pharmaceutical Statistics 16: 334-348\n\n9. Laud PJ (2018) Corrigendum: Equal-tailed confidence intervals for comparison of rates. Pharmaceutical Statistics 17: 290-293\n\n10. Blaker, H. (2000). Confidence curves and improved exact confidence intervals for discrete distributions, Canadian Journal of Statistics 28 (4), 783--798\n\n11. Klaschka, J. and Reiczigel, J. (2021). \"On matching confidence intervals and tests for some discrete distributions: Methodological and computational aspects,\" Computational Statistics, 36, 1775--1790.\n", "supporting": [ "ci_for_prop_files" ], diff --git a/_freeze/SAS/cmh/execute-results/html.json b/_freeze/SAS/cmh/execute-results/html.json index 22237332d..c33f69c35 100644 --- a/_freeze/SAS/cmh/execute-results/html.json +++ b/_freeze/SAS/cmh/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "f6438622389347d96ca7c2fa5944e68a", + "hash": "09f63477964842d3600882cc1841f6d6", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"CMH Test\"\nexecute: \n eval: false\n---\n\n# Cochran-Mantel-Haenszel Test\n\nThe CMH procedure tests for conditional independence in partial contingency tables for a 2 x 2 x K design. However, it can be generalized to tables of X x Y x K dimensions. This page also details derivation of risk differences and their confidence intervals which often accompnay a CMH test.\n\n## CMH in SAS\n\nThe cmh test is calculated in SAS using the PROC FREQ procedure. By default, it outputs the chi square statistic, degrees of freedom and p-value for each of the three alternative hypothesis: `general association`, `row means differ`, and `nonzero correlation`. It is up to the statistical analyst or statistician to know which result is appropriate for their analysis.\n\nWhen the design of the contingency table is 2 x 2 x K (i.e, X == 2 levels, Y == 2 levels, K \\>= 2 levels), the Mantel-Haenszel Common Odds Ratio (odds ratio estimate, 95% CI, P-value) and the Breslow-Day Test for Homogeneity of the Odds Ratios (chi-square statistic, degrees of freedom, P-value) are also output.\n\nBelow is the syntax to conduct a CMH analysis in SAS:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data = filtered_data; \n tables K * X * Y / cmh; \n * the order of K, X, and Y appearing on the line is important!;\nrun; \n```\n:::\n\n\n## Data used\n\nThe adcibc data described [here](https://psiaims.github.io/CAMIS/R/cmh.html) is used for this example.\n\n## Code used\n\nThe code used is always the same, however, we can limit the number of levels in each example to show a 2x2x2 case, 2x3xK case etc.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data = adcibc; \n tables agegr1 * trtp * sex / cmh; \nrun;\n```\n:::\n\n\n## Example 1: 2 x 2 x 2 (i.e, X = 2 TRT levels, Y = 2 SEX levels, K = 2 AGE levels)\n\nLet's test if there is a difference between 2 treatments (Placebo, and high dose), in the number of males and females, whilst adjusting for 2 levels of Age group (\\<65 and 65-\\<80). NOTE: prior to the proc freq, we have removed data in the low dose and \\>80 categories.\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\n## Example 2: 2 x 3 x K (i.e, X = 2 levels, Y = 3 levels, K \\>= 2 levels)\n\nLet's test if there is a difference between 3 treatments (Placebo, Xanomeline low dose and high dose), in the number of males and females, whilst adjusting for 3 levels of Age group (\\<65, 65-\\<80 and \\>=80). Here K=Agegrp1 the variable we are controlling for, X=Treatment -what we want to compare, and Y=Sex the variable we want to see if it's different between treatments (often this would be response/ non-response!).\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Example 3: Risk Differences - Comparing treatment differences within strata and combined across strata\n\nThe above examples are a test for general association or if the row means scores differ across the strata controlling for another factor, however we may want to get an estimate of the direction and size of treatment effect (with CI), either within each strata or combined across strata.\n\n### Risk Differences Within each Strata\n\nRisk differences within each strata can be obtained by adding the riskdiff option in SAS. The exact same output is obtained as per example 1 above, with the addition of 2 tables (1 for each age strata), showing the proportion of Female patients within each treatment (including 95% CIs), and the difference between the treatment proportions (including 95% CIs). By default, the CI's are calculated using Wald asymptotic confidence limits and in addition, the `exact` Clopper-Pearson confidence intervals for the risks. See SAS userguide [here](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details53.htm) and [here](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details53.htm) for more detail on the range of CI's SAS can calculate for the risk differences such as: Agresti-Caffo, exact unconditional, Hauck-Anderson, Miettinen-Nurminen (score), Newcombe (hybrid-score), Wald confidence limits, continuity-corrected Newcombe and continuity corrected Wald CIs.\n\nThe individual treatment comparisons within strata can be useful to explore if the treatment effect is in the same direction and size for each strata, such as to determine if pooling them is in fact sensible.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n# Default method is: Wald asymptotic confidence limits \nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh riskdiff; \nrun; \n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output3.png){fig-align='center' width=50%}\n:::\n:::\n\n\nNote above that exact CI's are not output for the difference betweeen the treatments. You can request SAS output other CI methods as shown below. This outputs the risk difference between the treatments and 95% CI, calculated for each age group strata separately using the Miettinen-Nurminen (score) (MN) method.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n# You can change the confidence limits derivation using (cl=xxx) option\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \"\\>80\")); \n tables agegr1 * trtp * sex / cmh riskdiff(cl=mn);\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\n### SAS Common error: SAS cannot do a stratified Miettinen-Nurminen (score) method!\n\nNote: you may think by running the following code, that you would be creating a common risk difference using the stratified Miettinen-Nurminen method. However, this is actually performing an unstratified Miettinen-Nurminen (Score) method. The output contains the same risk differences calculated for each strata separately and then a common risk difference (however this is NOT a stratified approach !!). See [SAS guide](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details63.htm) for more detail.\n\nSee the next section on Common risk differences available in SAS.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n# Specifying the Miettinen-Nurminen (score) method \nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh riskdiff (common cl=mn); \nrun; \n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output3c.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### SAS Common error: Make sure you output the risk difference for the correct level!\n\nIncluding either column=1 or column=2 tells SAS which is your outcome of interest (ie, often that you want to compare treatment responders and not treatment non-responders!) My default it takes the 1st column sorted alphabetically but you can change this as shown below.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \"\\>80\")); \n tables agegr1 * trtp * sex / cmh riskdiff(common column=2);\nrun;\n\n```\n:::\n\n\n### Common Risk Differences across Strata\n\nProc freq can calculate estimates of the common risk difference with 95% CIs, calculated using the Mantel-Haenszel and summary score (Miettinen-Nurminen) methods for multiway 2x2 tables. It can also provide stratified Newcombe confidence intervals using the method by Yan and Su (2010). The stratified Newcombe CIs are constructed from stratified Wilson CIs for the common (overall) row proportions. See [SAS help](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details63.htm) for more detail.\n\nNote that SAS (since v9.3M2 / STAT 12.1) PROC FREQ will produce the Miettinen-Nurminen ('MN') score interval for unstratified datasets only. Using 'commonriskdiff' requests risks (binomial proportions) and risk differences for 2x2 tables. But doesn't extend to stratified analysis.\n\nThe only known way to have SAS produce stratified Miettinen-Nurminen CIs is to use this publicly available macro: [https://github.com/petelaud/ratesci-sas/tree/main](https://urldefense.com/v3/__https:/github.com/petelaud/ratesci-sas/tree/main__;!!GfteaDio!d_y6BtRjdLQgc_Wr-2-HGeyDSL1v71SvjvEQuVXNjzYaqLVDsEH0DdBLCBE3Q6LGFaTjE4LCjtECNYNFYfMSg4G49w$)\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh commonriskdiff(CL=SCORE TEST=SCORE); \nrun; \n\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output4.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh commonriskdiff(CL= newcombe); \nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output4b.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# References\n\nSAS documentation (Specification): https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.3/statug/statug_freq_examples07.htm\n\nSAS documentation (Theoretical Basis + Formulas): https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/procstat/procstat_freq_details92.htm\n\nCreated using : SAS release:9.04.01M7P08062020\"", + "markdown": "---\ntitle: \"CMH Test\"\n---\n\n# Cochran-Mantel-Haenszel Test\n\nThe CMH procedure tests for conditional independence in partial contingency tables for a 2 x 2 x K design. However, it can be generalized to tables of X x Y x K dimensions. This page also details derivation of risk differences and their confidence intervals which often accompany a CMH test.\n\n## CMH in SAS\n\nThe CMH test is calculated in SAS using the PROC FREQ procedure. By default, it outputs the chi square statistic, degrees of freedom and p-value for each of the three alternative hypothesis: `general association`, `row means differ`, and `nonzero correlation`. It is up to the statistical analyst or statistician to know which result is appropriate for their analysis.\n\nWhen the design of the contingency table is 2 x 2 x K (i.e, X == 2 levels, Y == 2 levels, K \\>= 2 levels), the Mantel-Haenszel Common Odds Ratio (odds ratio estimate, 95% CI, P-value) and the Breslow-Day Test for Homogeneity of the Odds Ratios (chi-square statistic, degrees of freedom, P-value) are also output.\n\nBelow is the syntax to conduct a CMH analysis in SAS:\n\n```sas\nproc freq data = filtered_data; \n tables K * X * Y / cmh; \n * the order of K, X, and Y appearing on the line is important!;\nrun; \n```\n\n## Data used\n\nThe adcibc data described [here](https://psiaims.github.io/CAMIS/R/cmh.html) is used for this example.\n\n## Code used\n\nThe code used is always the same, however, we can limit the number of levels in each example to show a 2x2x2 case, 2x3xK case etc.\n\n```sas\nproc freq data = adcibc; \n tables agegr1 * trtp * sex / cmh; \nrun;\n```\n\n## Example 1: 2 x 2 x 2 (i.e, X = 2 TRT levels, Y = 2 SEX levels, K = 2 AGE levels)\n\nLet's test if there is a difference between 2 treatments (Placebo, and high dose), in the number of males and females, whilst adjusting for 2 levels of Age group (\\<65 and 65-\\<80). NOTE: prior to the proc freq, we have removed data in the low dose and \\>80 categories.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Example 2: 2 x 3 x K (i.e, X = 2 levels, Y = 3 levels, K \\>= 2 levels)\n\nLet's test if there is a difference between 3 treatments (Placebo, Xanomeline low dose and high dose), in the number of males and females, whilst adjusting for 3 levels of Age group (\\<65, 65-\\<80 and \\>=80). Here K=Agegrp1 the variable we are controlling for, X=Treatment -what we want to compare, and Y=Sex the variable we want to see if it's different between treatments (often this would be response/ non-response!).\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Example 3: Risk Differences - Comparing treatment differences within strata and combined across strata\n\nThe above examples are a test for general association or if the row means scores differ across the strata controlling for another factor, however we may want to get an estimate of the direction and size of treatment effect (with CI), either within each strata or combined across strata.\n\n### Risk Differences Within each Strata\n\nRisk differences within each strata (for 2 x 2 x K tables) can be obtained by adding the riskdiff option in SAS. The exact same output is obtained as per example 1 above, with the addition of 2 tables (1 for each age strata), showing the proportion of Female patients within each treatment (including 95% CIs), and the difference between the treatment proportions (including 95% CIs). By default, the CI's are calculated using Wald asymptotic confidence limits and in addition, the `exact` Clopper-Pearson confidence intervals for the risks. See SAS userguide [here](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details53.htm) and [here](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details53.htm) for more detail on the range of CI's SAS can calculate for the risk differences such as: Agresti-Caffo, exact unconditional, Hauck-Anderson, Miettinen-Nurminen (score), Newcombe (hybrid-score), Wald confidence limits, continuity-corrected Newcombe and continuity corrected Wald CIs.\n\nThe individual treatment comparisons within strata can be useful to explore if the treatment effect is in the same direction and size for each strata, such as to determine if pooling them is in fact sensible.\n\n```sas\n# Default method is: Wald asymptotic confidence limits \nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh riskdiff; \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output3.png){fig-align='center' width=50%}\n:::\n:::\n\n\nNote above that exact CI's are not output for the difference betweeen the treatments. You can request SAS output other CI methods as shown below. This outputs the risk difference between the treatments and 95% CI, calculated for each age group strata separately using the Miettinen-Nurminen (score) (MN) method.\n\n```sas\n# You can change the confidence limits derivation using (cl=xxx) option\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \"\\>80\")); \n tables agegr1 * trtp * sex / cmh riskdiff(cl=mn);\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output3b.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### SAS Common error: SAS v9.4 cannot do a stratified Miettinen-Nurminen (score) method!\n\nNote: you may think by running the following code, that you would be creating a common risk difference using the stratified Miettinen-Nurminen method. However, this is actually performing an unstratified Miettinen-Nurminen (Score) method. The output contains the same risk differences calculated for each strata separately and then a common risk difference using the 'summary score estimate' approach (however this is NOT a stratified approach !!). See [SAS guide](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details63.htm) for more detail.\n\nPROC FREQ in the SAS Viya platform has introduced a new COMMONRISKDIFF(CL=MN/MNMH) option for the TABLES statement that does use the stratified MN method.\n\nSee the next section on Common risk differences available in SAS.\n\n```sas\n# Specifying the Miettinen-Nurminen (score) method \nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh riskdiff (common cl=mn); \nrun; \n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output3c.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### SAS Common error: Make sure you output the risk difference for the correct level!\n\nIncluding either column=1 or column=2 tells SAS which is your outcome of interest (ie, often that you want to compare treatment responders and not treatment non-responders!) By default it takes the 1st column sorted alphabetically but you can change this as shown below.\n\nNote that if the data are such that there are no responses in either treatment group, the cross-tabulation will only have 1 column, and SAS PROC FREQ will fail to produce a confidence interval. In this unusual situation however, a valid confidence interval can (and should) still be produced, which may be obtained using the %SCORECI macro available at [https://github.com/petelaud/ratesci-sas/tree/main](#0).\n\n```sas\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \"\\>80\")); \n tables agegr1 * trtp * sex / cmh riskdiff(common column=2);\nrun;\n\n```\n\n### Common Risk Differences across Strata\n\nProc freq can calculate estimates of the common risk difference with 95% CIs, calculated using the Mantel-Haenszel and summary score (Miettinen-Nurminen) methods for multiway 2x2 tables. It can also provide stratified Newcombe confidence intervals using the method by Yan and Su (2010). The stratified Newcombe CIs are constructed from stratified Wilson CIs for the common (overall) row proportions. See [SAS help](https://support.sas.com/documentation/cdl/en/procstat/67528/HTML/default/viewer.htm#procstat_freq_details63.htm) for more detail.\n\nNote that SAS (since v9.3M2 / STAT 12.1) PROC FREQ will produce the Miettinen-Nurminen ('MN') score interval for unstratified datasets only. The 'commonriskdiff' option added in SAS/STAT 14.3 requests risks (binomial proportions) and risk differences for 2x2 tables. But doesn't extend to stratified analysis.\n\nStratified Miettinen-Nurminen CIs have more recently been added to PROC FREQ, but only within the SAS Viya platform (using CL=MN or CL=MNMH in the COMMONRISKDIFF option). The only other known way to have SAS produce these CIs is to use this publicly available macro: [https://github.com/petelaud/ratesci-sas/tree/main](#0). Note that the macro (currently) does not implement Miettinen & Nurminen's proposed iterative weights, but instead the simpler (and similar) MH weights are used.\n\n```sas\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh commonriskdiff(CL=SCORE TEST=SCORE); \nrun; \n\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output4.png){fig-align='center' width=50%}\n:::\n:::\n\n\n```sas\nproc freq data = adcibc (where=(trtpn ne 54 and agegr1 ne \">80\")); \n tables agegr1 * trtp * sex / cmh commonriskdiff(CL= newcombe); \nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/cmh/saspage_output4b.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# References\n\nSAS documentation (Specification): https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.3/statug/statug_freq_examples07.htm\n\nSAS documentation (Theoretical Basis + Formulas): https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/procstat/procstat_freq_details92.htm\n\nCreated using : SAS release:9.04.01M7P08062020\"", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/correlation/execute-results/html.json b/_freeze/SAS/correlation/execute-results/html.json deleted file mode 100644 index c0d822c7c..000000000 --- a/_freeze/SAS/correlation/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "b789ce68a56fd8d9a89213c8872d4838", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Correlation Analysis using SAS\"\nexecute: \n eval: false\n---\n\n# **Example: Lung Cancer Data**\n\n*Data source:* *Loprinzi CL. Laurie JA. Wieand HS. Krook JE. Novotny PJ. Kugler JW. Bartel J. Law M. Bateman M. Klatt NE. et al. Prospective evaluation of prognostic variables from patient-completed questionnaires. North Central Cancer Treatment Group. Journal of Clinical Oncology. 12(3):601-7, 1994.*\n\nSurvival in patients with advanced lung cancer from the North Central Cancer Treatment Group. Performance scores rate how well the patient can perform usual daily activities.\n\n## Overview\n\nThe `CORR` procedure computes Pearson correlation coefficients, three nonparametric measures of association, and the probabilities associated with these statistics. The correlation statistics include the following:\n\n- Pearson product-moment correlation\n\n- Spearman rank-order correlation\n\n- Kendall's tau-b coefficient\n\n- Hoeffding's measure of dependence, ![](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/images/procstat_corr0001.png){alt=\"\"}\n\n- Pearson, Spearman, and Kendall partial correlation\n\nThis program works on the first three correlation coefficients.\n\n**Missing Values**\n\nPROC CORR excludes observations with missing values in the WEIGHT and FREQ variables. By default, PROC CORR uses ***pairwise deletion*** when observations contain missing values. PROC CORR includes all nonmissing pairs of values for each pair of variables in the statistical computations. Therefore, the correlation statistics might be based on different numbers of observations.\n\nIf you specify the NOMISS option, PROC CORR uses ***listwise deletion*** when a value of the VAR or WITH statement variable is missing. PROC CORR excludes all observations with missing values from the analysis. Therefore, the number of observations for each pair of variables is identical.\n\nThe PARTIAL statement always excludes the observations with missing values by automatically invoking the NOMISS option. With the NOMISS option, the data are processed more efficiently because fewer resources are needed. Also, the resulting correlation matrix is nonnegative definite.\n\nIn contrast, if the data set contains missing values for the analysis variables and the NOMISS option is not specified, the resulting correlation matrix might not be nonnegative definite. This leads to several statistical difficulties if you use the correlations as input to regression or other statistical procedures.\n\n## **Pearson Correlation**\n\n```{sas}\nproc corr data=lung pearson;\n var age mealcal;\nrun;\n```\n\n![](/images/correlation/pearson.jpg){fig-align=\"center\"}\n\n## **Spearman Correlation**\n\n```{sas}\nproc corr data=lung spearman; \n var age mealcal; \nrun;\n```\n\n![](/images/correlation/spearman.jpg){fig-align=\"center\"}\n\n## Kendall's rank correlation\n\n```{sas}\nproc corr data=lung kendall;\n var age mealcal;\nrun;\n```\n\n![](/images/correlation/kendall.jpg){fig-align=\"center\"}\n\n## References\n\n[PROC CORR: The CORR Procedure (sas.com)](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#corr_toc.htm)\n\n", - "supporting": [ - "correlation_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/count_data_regression/execute-results/html.json b/_freeze/SAS/count_data_regression/execute-results/html.json index 2eddfde85..ddec1f75e 100644 --- a/_freeze/SAS/count_data_regression/execute-results/html.json +++ b/_freeze/SAS/count_data_regression/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "0277830eb028b94119177e0bc542421a", + "hash": "a6ba43fade8ae51685328e238be9398c", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Poisson and Negative Binomial Regression in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\nThis page serves as an introduction to performing Poisson and Negative Binomial regression in SAS. For detail on how results compare between R and SAS see [RvsSAS](R%20vs%20SAS:%20Negative%20Binomial%20Regression).\n\n# **Regression for Count Data**\n\nThe most commonly used models for count data in clinical trials include:\n\n- Poisson regression: assumes the response variable Y has a Poisson distribution, which is linked using the logarithm with explanatory variables x.\n\n$$\n\\text{log}(E(Y|x))= \\beta_0 + \\beta' x, \\; i = 1,\\ldots,n \n$$\n\n- Quasi-Poisson regression: Poisson model that allows overdispersion, i.e. dispersion parameter is not fixed at one.\n\n- Negative-Binomial regression: popular generalization which loosens the assumption that the variance is equal to the mean made by the Poisson model.\n\nOther models include hurdle or zero-inflated models, if data have more zero observations than expected.\n\n## **Example Data**\n\nTo demonstrate the use of poisson and negative binomial regression we examine the same `polyps` dataset as used in the R example for poisson and negative binomial regression [here](https://psiaims.github.io/CAMIS/R/count_data_regression.html).\n\n## **Example: Poisson Model in SAS**\n\nIn SAS, we can use proc genmod to perform poisson regression.\n\n**The OM (obsmargins) option**\n\nIt is generally good practice to apply the OM option on the lsmeans statement. The standard (default) LS-means have equal coefficients across classification effects; however, the OM option changes these coefficients to be proportional to those found in OM-data-set. This adjustment is reasonable when you want your inferences to apply to a population that is not necessarily balanced but has the margins observed in OM-data-set. See [here](https://documentation.sas.com/doc/en/pgmsascdc/v_062/statug/statug_mixed_syntax08.htm) for more details.\n\n**Odds Ratios and 95% CIs**\n\nYou can use exponential of the maximum likelihood parameter estimate (for treat and age in this example), and the exponential of the Wald 95% Confidence Limits to obtain the odds ratios and 95% CIs. Estimates of the least squares means and CI's for each treatment are output using the lsmeans option. These estimates also have to be back transformed using exponential distribution to get the mean count back onto the original scale. Proc Genmod uses GLM parameterization.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nods output ParameterEstimates=ORs lsmeans=lsm;\nproc genmod data=polyps; \n class treat (ref=\"placebo\"); \n model number = treat age / dist=poisson;\n lsmeans treat/ cl OM;\nrun; \nods output close;\n\n# read in ORs and back transform the estimates (lsm can be done same way)\ndata bt_or;\n set ors;\n OR=exp(estimate);\n OR_low=exp(lowerwaldcl);\n OR_high=exp(upperwaldcl);\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/count_data_regression/poisson1.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/count_data_regression/poisson2.png){fig-align='center' width=50%}\n:::\n:::\n\n\nBack transformation of the parameter estimates and 95% CIs produces the following results\n\nLsmean number of polyps (95% CI) on Drug: 9.02 (7.30 - 11.13)\n\nLsmean number of polyps (95% CI) on Placebo: 35.10 (31.72-38.84)\n\nOdds ratio (95% CI)= 0.2569 (0.2040 - 0.3235)\n\nHence, patients on Drug have significantly less polyps than those on placebo.\n\n## **Example: negative binomial in SAS**\n\nIn SAS, we can use proc genmod to perform negative binomial regression. The below example assumes all patients are followed for the same duration, however if patients are followed for different durations, you should use `offset=logtime` on the model row after the \"/ \" as an option to offset the model by the log of the patients duration on the study.\n\nModel parameterization is very similar to poisson\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nods output ParameterEstimates=ORs lsmeans=lsm;\nproc genmod data=polyps; \n class treat (ref=\"placebo\"); \n model number = treat age / dist=negbin link=log;\n lsmeans treat/ cl OM;\nrun;\n\nods output close;\n\n# read in lsmeans and back transform the estimates (ORs can be done same way)\ndata bt_lsm;\n set lsm;\n lsmean_count=exp(estimate);\n mean_low=exp(lower);\n mean_high=exp(upper);\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/count_data_regression/negbin1.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/count_data_regression/negbin2.png){fig-align='center' width=50%}\n:::\n:::\n\n\nBack transformation of the parameter estimates and 95% CIs produces the following results\n\nLsmean number of polyps (95% CI) on Drug: 8.97 (5.18 - 15.53)\n\nLsmean number of polyps (95% CI) on Placebo: 35.24 (22.12-56.13)\n\nOdds ratio Drug/Placebo (95% CI)= 0.2546 (0.1232 - 0.5259)\n\nHence, patients on Drug have significantly less polyps than those on placebo.\n\n# References\n\nCreated using : SAS release:9.04.01M7P08062020\"", + "markdown": "---\ntitle: \"Poisson and Negative Binomial Regression in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\n---\n\nThis page serves as an introduction to performing Poisson and Negative Binomial regression in SAS. For detail on how results compare between R and SAS see [RvsSAS](R%20vs%20SAS:%20Negative%20Binomial%20Regression).\n\n# **Regression for Count Data**\n\nThe most commonly used models for count data in clinical trials include:\n\n- Poisson regression: assumes the response variable Y has a Poisson distribution, which is linked using the logarithm with explanatory variables x.\n\n$$\n\\text{log}(E(Y|x))= \\beta_0 + \\beta' x, \\; i = 1,\\ldots,n \n$$\n\n- Quasi-Poisson regression: Poisson model that allows overdispersion, i.e. dispersion parameter is not fixed at one.\n\n- Negative-Binomial regression: popular generalization which loosens the assumption that the variance is equal to the mean made by the Poisson model.\n\nOther models include hurdle or zero-inflated models, if data have more zero observations than expected.\n\n## **Example Data**\n\nTo demonstrate the use of poisson and negative binomial regression we examine the same `polyps` dataset as used in the R example for poisson and negative binomial regression [here](https://psiaims.github.io/CAMIS/R/count_data_regression.html).\n\n## **Example: Poisson Model in SAS**\n\nIn SAS, we can use proc genmod to perform poisson regression.\n\n**The OM (obsmargins) option**\n\nIt is generally good practice to apply the OM option on the lsmeans statement. The standard (default) LS-means have equal coefficients across classification effects; however, the OM option changes these coefficients to be proportional to those found in OM-data-set. This adjustment is reasonable when you want your inferences to apply to a population that is not necessarily balanced but has the margins observed in OM-data-set. See [here](https://documentation.sas.com/doc/en/pgmsascdc/v_062/statug/statug_mixed_syntax08.htm) for more details.\n\n**Odds Ratios and 95% CIs**\n\nYou can use exponential of the maximum likelihood parameter estimate (for treat and age in this example), and the exponential of the Wald 95% Confidence Limits to obtain the odds ratios and 95% CIs. Estimates of the least squares means and CI's for each treatment are output using the lsmeans option. These estimates also have to be back transformed using exponential distribution to get the mean count back onto the original scale. Proc Genmod uses GLM parameterization.\n\n```sas\nods output ParameterEstimates=ORs lsmeans=lsm;\nproc genmod data=polyps; \n class treat (ref=\"placebo\"); \n model number = treat age / dist=poisson;\n lsmeans treat/ cl OM;\nrun; \nods output close;\n\n# read in ORs and back transform the estimates (lsm can be done same way)\ndata bt_or;\n set ors;\n OR=exp(estimate);\n OR_low=exp(lowerwaldcl);\n OR_high=exp(upperwaldcl);\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/count_data_regression/poisson1.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/count_data_regression/poisson2.png){fig-align='center' width=50%}\n:::\n:::\n\n\nBack transformation of the parameter estimates and 95% CIs produces the following results\n\nLsmean number of polyps (95% CI) on Drug: 9.02 (7.30 - 11.13)\n\nLsmean number of polyps (95% CI) on Placebo: 35.10 (31.72-38.84)\n\nOdds ratio (95% CI)= 0.2569 (0.2040 - 0.3235)\n\nHence, patients on Drug have significantly less polyps than those on placebo.\n\n## **Example: negative binomial in SAS**\n\nIn SAS, we can use proc genmod to perform negative binomial regression. The below example assumes all patients are followed for the same duration, however if patients are followed for different durations, you should use `offset=logtime` on the model row after the \"/ \" as an option to offset the model by the log of the patients duration on the study.\n\nModel parameterization is very similar to poisson\n\n```sas\nods output ParameterEstimates=ORs lsmeans=lsm;\nproc genmod data=polyps; \n class treat (ref=\"placebo\"); \n model number = treat age / dist=negbin link=log;\n lsmeans treat/ cl OM;\nrun;\n\nods output close;\n\n# read in lsmeans and back transform the estimates (ORs can be done same way)\ndata bt_lsm;\n set lsm;\n lsmean_count=exp(estimate);\n mean_low=exp(lower);\n mean_high=exp(upper);\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/count_data_regression/negbin1.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../images/count_data_regression/negbin2.png){fig-align='center' width=50%}\n:::\n:::\n\n\nBack transformation of the parameter estimates and 95% CIs produces the following results\n\nLsmean number of polyps (95% CI) on Drug: 8.97 (5.18 - 15.53)\n\nLsmean number of polyps (95% CI) on Placebo: 35.24 (22.12-56.13)\n\nOdds ratio Drug/Placebo (95% CI)= 0.2546 (0.1232 - 0.5259)\n\nHence, patients on Drug have significantly less polyps than those on placebo.\n\n# References\n\nCreated using : SAS release:9.04.01M7P08062020\"", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/gee/execute-results/html.json b/_freeze/SAS/gee/execute-results/html.json index 3739ec17f..8ad52d53c 100644 --- a/_freeze/SAS/gee/execute-results/html.json +++ b/_freeze/SAS/gee/execute-results/html.json @@ -1,11 +1,9 @@ { - "hash": "1163181ad9d5b1f4b415ecdf1be5e592", + "hash": "41920ec13c270c3735e625ba4b7d8b55", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Generalized Estimating Equations (GEE) methods in SAS\"\nexecute: \n eval: false\n---\n\n# INTRODUCTION\n\nGeneralized Estimating Equations (GEE) methods extend the Generalized Linear Model (GLM) framework using link functions that relate the predictors to transformed outcome variable. For dichotomous response variables, the link functions is the probit (in case of rare events complementary log-log may be preferable). For outcomes with more than two categories, the cumulative link function is used in case of ordinal variables and generalized logit for nominal variables.\n\nGEE are marginal models and therefore estimate population-averaged effects and within-subject correlation is analysed by specifying a working correlation structure (as in MMRM). Estimators are obtained via quasi-likelihood via iterative solving of estimating equations.\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) in the SAS/STAT Sample Program Library \\[1\\] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT and VISIT were renamed to TRTP and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. The resulting dataset is saved here: /data/resp.xlsx The following SAS code was used:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc format; \n value respmulti \n 1='Liver' \n 2='Lung' \n 3='Bone'; \nrun; \n \ndata resp; \n set resp1; \n call streaminit(1234); \n respord = rand(\"integer\", 1, 3); *Ordinal; \n respnom = put(respord, respmulti.); *Nominal;\nrun;\n```\n:::\n\n\n# BINARY OUTCOME\n\n### CODE\n\nThis example shows syntax with `PROC GEE` using the example data. The probability of the event ( specified as `event='1')` is analyzed including treatment, visit and the treatment by visit interaction as fixed effects. The independent correlation matrix (which is the option by default), is used to account for intra-subjects correlation.\n\nThe binomial distribution and the link function (in this case is the logit function) are specified in the `model` statement as /`dist=bin link=logit`). The unique subject and the correlation structure are defined as `repeated subject=/corr=` .\n\nThe U.S. Food and Drug Administration (FDA) advises \"*sponsors to consider using of robust standard error method such as the Huber-White \"sandwich\" standard error, particularly when the model does not include treatment by covariate interactions*\" \\[2\\]. This robust \"sandwich\" SE is computed by default in `PROC GEE`. Nominal SE (also called model-based SE) can be easily obtained by adding the `modelse` option in the `repeated` statement. This option is commented out in the code below, but included to indicate its availability.\n\nPredicted probabilities and Odds Ratios (OR) can be obtained in SAS using the `lsmeans` statement:\n\n- The `ilink` option provides back-transformed predicted probabilities.\n\n- The `diff` option, combined with either `exp` or `oddsratio`, computes ORs.\n\n- `cl` computes confidence intervals.\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc gee data=resp; \n class trtp(ref=\"P\") avisitn(ref='1') usubjid; \n model outcome(event='1') = trtp avisitn trtp*avisitn/ dist=bin link=logit; \n lsmeans trtp*avisitn/cl exp ilink oddsratio diff; \n repeated subject=usubjid/corr=ind /*modelse*/; \nrun;\n\n```\n:::\n\n\nSimilar syntax can be used in `PROC GENMOD`. While the syntax is equivalent, very slight differences in the results may occur, typically beyond the tenth decimal place.\n\n### RESULTS\n\nResults were extracted into a SAS dataset using the `ODS OUTPUT` statement and subsequently refined by selecting key rows to facilitate clearer presentation. For instance, it only displays the probabilities for the treatment by visit interaction, and the OR comparing the active treatment versus placebo.\n\n[Estimated Parameters:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/1_estimated_parameters.png){fig-align='center' width=50%}\n:::\n:::\n\n\n[Probability of event:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/2_probability_of_event.png){fig-align='center' width=50%}\n:::\n:::\n\n\n[ODDS RATIO (OR):]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/3_odds_ratio.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# OUTCOME WITH MORE THAN 2 CATEGORIES\n\nSimilar syntax as for binary variables can be applied by specifying a multinomial distribution and selecting the appropriate link function. Models with cumulative link functions apply to ordinal data and generalized logit models are fit to nominal data \\[3\\]). Note the link function generalized logit is available in `PROC GEE`, but not in `PROC GENMOD.`\n\nFor multinomial responses, SAS limits the correlation matrix type to `independent,` so other correlation maxtrix options are not supported.\n\nThe estimated parameters for each model are detailed below. ORs can be obtained using the `LSMEANS` statement, following the same approach used for binary outcomes.\n\n### CODE\n\n[Ordinal variable:]{.underline}\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc gee data=resp; \nclass trtp(ref=\"A\") avisitn(ref='1') usubjid; \nmodel respord=trtp avisitn trtp*avisitn/ dist=multinomial link=cumlogit; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nrepeated subject=usubjid/corr=ind; \nrun;\n```\n:::\n\n\n[Nominal variable:]{.underline}\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc gee data=resp ; \nclass trtp(ref=\"A\") avisitn(ref='1') usubjid; \nmodel respnom(event='Liver')=trtp avisitn trtp*avisitn/ dist=multinomial link=glogit; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nrepeated subject=usubjid/corr=ind;\nrun;\n```\n:::\n\n\n### RESULTS\n\n[Ordinal variable:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/4_ordinal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n[Nominal variable:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/5_nominal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## RERERENCES\n\n1. [SAS Institute Inc. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n2. [SAS/STAT® 13.1 User's Guide The GEE Procedure.](https://support.sas.com/documentation/onlinedoc/stat/141/gee.pdf)\n\n3. [SAS/STAT® 13.1 User's Guide The GENMOD Procedure.](https://support.sas.com/documentation/onlinedoc/stat/131/genmod.pdf)", - "supporting": [ - "gee_files" - ], + "markdown": "---\ntitle: \"Generalized Estimating Equations (GEE) methods in SAS\"\n---\n\n# INTRODUCTION\n\nGeneralized Estimating Equations (GEE) methods extend the Generalized Linear Model (GLM) framework using link functions that relate the predictors to transformed outcome variable. For dichotomous response variables, the link functions is the probit (in case of rare events complementary log-log may be preferable). For outcomes with more than two categories, the cumulative link function is used in case of ordinal variables and generalized logit for nominal variables.\n\nGEE are marginal models and therefore estimate population-averaged effects and within-subject correlation is analysed by specifying a working correlation structure (as in MMRM). Estimators are obtained via quasi-likelihood via iterative solving of estimating equations.\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) in the SAS/STAT Sample Program Library \\[1\\] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT and VISIT were renamed to TRTP and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. The resulting dataset is saved here: /data/resp.xlsx The following SAS code was used:\n\n```sas\nproc format; \n value respmulti \n 1='Liver' \n 2='Lung' \n 3='Bone'; \nrun; \n \ndata resp; \n set resp1; \n call streaminit(1234); \n respord = rand(\"integer\", 1, 3); *Ordinal; \n respnom = put(respord, respmulti.); *Nominal;\nrun;\n```\n\n# BINARY OUTCOME\n\n### CODE\n\nThis example shows syntax with `PROC GEE` using the example data. The probability of the event ( specified as `event='1')` is analyzed including treatment, visit and the treatment by visit interaction as fixed effects. The independent correlation matrix (which is the option by default), is used to account for intra-subjects correlation.\n\nThe binomial distribution and the link function (in this case is the logit function) are specified in the `model` statement as /`dist=bin link=logit`). The unique subject and the correlation structure are defined as `repeated subject=/corr=` .\n\nThe U.S. Food and Drug Administration (FDA) advises \"*sponsors to consider using of robust standard error method such as the Huber-White \"sandwich\" standard error, particularly when the model does not include treatment by covariate interactions*\" \\[2\\]. This robust \"sandwich\" SE is computed by default in `PROC GEE`. Nominal SE (also called model-based SE) can be easily obtained by adding the `modelse` option in the `repeated` statement. This option is commented out in the code below, but included to indicate its availability.\n\nPredicted probabilities and Odds Ratios (OR) can be obtained in SAS using the `lsmeans` statement:\n\n- The `ilink` option provides back-transformed predicted probabilities.\n\n- The `diff` option, combined with either `exp` or `oddsratio`, computes ORs.\n\n- `cl` computes confidence intervals.\n\n\n```sas\nproc gee data=resp; \n class trtp(ref=\"P\") avisitn(ref='1') usubjid; \n model outcome(event='1') = trtp avisitn trtp*avisitn/ dist=bin link=logit; \n lsmeans trtp*avisitn/cl exp ilink oddsratio diff; \n repeated subject=usubjid/corr=ind /*modelse*/; \nrun;\n\n```\n\nSimilar syntax can be used in `PROC GENMOD`. While the syntax is equivalent, very slight differences in the results may occur, typically beyond the tenth decimal place.\n\n### RESULTS\n\nResults were extracted into a SAS dataset using the `ODS OUTPUT` statement and subsequently refined by selecting key rows to facilitate clearer presentation. For instance, it only displays the probabilities for the treatment by visit interaction, and the OR comparing the active treatment versus placebo.\n\n[Estimated Parameters:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/1_estimated_parameters.png){fig-align='center' width=50%}\n:::\n:::\n\n\n[Probability of event:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/2_probability_of_event.png){fig-align='center' width=50%}\n:::\n:::\n\n\n[ODDS RATIO (OR):]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/3_odds_ratio.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# OUTCOME WITH MORE THAN 2 CATEGORIES\n\nSimilar syntax as for binary variables can be applied by specifying a multinomial distribution and selecting the appropriate link function. Models with cumulative link functions apply to ordinal data and generalized logit models are fit to nominal data \\[3\\]). Note the link function generalized logit is available in `PROC GEE`, but not in `PROC GENMOD.`\n\nFor multinomial responses, SAS limits the correlation matrix type to `independent,` so other correlation maxtrix options are not supported.\n\nThe estimated parameters for each model are detailed below. ORs can be obtained using the `LSMEANS` statement, following the same approach used for binary outcomes.\n\n### CODE\n\n[Ordinal variable:]{.underline}\n\n\n```sas\nproc gee data=resp; \nclass trtp(ref=\"A\") avisitn(ref='1') usubjid; \nmodel respord=trtp avisitn trtp*avisitn/ dist=multinomial link=cumlogit; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nrepeated subject=usubjid/corr=ind; \nrun;\n```\n\n[Nominal variable:]{.underline}\n\n\n```sas\nproc gee data=resp ; \nclass trtp(ref=\"A\") avisitn(ref='1') usubjid; \nmodel respnom(event='Liver')=trtp avisitn trtp*avisitn/ dist=multinomial link=glogit; \nlsmeans trtp*avisitn/cl exp ilink oddsratio diff;\nrepeated subject=usubjid/corr=ind;\nrun;\n```\n\n### RESULTS\n\n[Ordinal variable:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/4_ordinal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n[Nominal variable:]{.underline}\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gee/5_nominal_variable.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## RERERENCES\n\n1. [SAS Institute Inc. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n2. [SAS/STAT® 13.1 User's Guide The GEE Procedure.](https://support.sas.com/documentation/onlinedoc/stat/141/gee.pdf)\n\n3. [SAS/STAT® 13.1 User's Guide The GENMOD Procedure.](https://support.sas.com/documentation/onlinedoc/stat/131/genmod.pdf)", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/SAS/glmm/execute-results/html.json b/_freeze/SAS/glmm/execute-results/html.json index 4f5722de3..014c20b11 100644 --- a/_freeze/SAS/glmm/execute-results/html.json +++ b/_freeze/SAS/glmm/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "d2e406c7e00ff006ecc3748689f77e4a", + "hash": "fc89287ac3b245664e793112e8cdd51c", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Generalized Linear Mixed Models (GLMM)\"\n---\n\n# INTRODUCTION\n\nGeneralized Linear Mixed Models (GLMM) method combines the characteristics of the Generalized Linear Model (GLM), with mixed models (such a repeated measures over time).  It extends the GLM framework using link functions that relate the predictors to transformed outcome variable.\n\n$$\nE(Y)=\\mu\n$$\n\n$$\ng(\\mu) = X\\beta + Zb, \\qquad b \\sim N(0, G)\n$$\n\nWhere:\n\nn: number of observations, p: number of fixed effects, q: number of random effects (subjects).\n\nY: vector of observed response variable (n x 1)\n\ng: Link function that transforms Y to the linear scale (eg: logit)\n\nX: matrix for fixed effects (n x p), Z: matrix of random effects, G: covariance matrix of the random effects.\n\nB: vector of fixed effects coefficients (p x 1)., b: vector of random effects.\n\n**Link Function:**\n\n- Dichotomous response variable: probit (in case of rare events complementary log-log may be preferable).\n\n- Outcomes with more than two categories:\n\n - Ordinal variable: cumulative\n\n - Nominal variable: generalized logit\n\n**Random Effects**\n\nGLMM are conditional models and estimate subject-average effects, and the intra-subject correlation is modelled via random effects. Unlike GEE models, GLMM models allow individual-level inference.\n\n**Estimation Methods**\n\nMaximum likelihood, based on approximations:\n\n- Gauss Hermite Quadrature (GHQ): Integral split in a given number of points.\n\n- Laplace: A specific case of GHQ, using 1 point.\n\nPenalized Likelihood can be used too, but it is known that in binary data **it underestimates variance components and biased results.**\n\n**Note:** The results shown in this section have been post‑processed to enhance visual comparability. Specifically, the outputs were extracted using the ODS OUTPUT statement and filtered to retain only the relevant values\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) in the SAS/STAT Sample Program Library [1] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT, BASELINE, and VISIT were renamed to TRTP, BASE, and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. The following SAS code was used:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc format; \n value respmulti \n 1='Liver' \n 2='Lung' \n 3='Bone'; \nrun; \n \ndata resp; \n set resp1; \n call streaminit(1234); \n respord = rand(\"integer\", 1, 3); *Ordinal; \n respnom = put(respord, respmulti.); *Nominal;\nrun;\n```\n:::\n\n\n# GLMM WITH GHQ\n\nGLMM with GHQ approximation can be fitted using `PROC GLIMMIX` by specifying `quad(qpoints=)` . The random effects are specified with the `RANDOM` statement, while the `TYPE=` option statement can be used to specify the covariance structure of G (variance matrix of the random effects). Variance Components by default (type=VC) is the default option.\n\nUnlike in GEE models, which computed the robust Sandwich S.E. by default, the GLIMMIX procedure displays the model-based S.E. (also called naïve S.E. in R) by default.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc glimmix data=resp method=quad(qpoints=5);\nclass trtp(ref=\"P\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW;/*1*/;\nrandom intercept /subject=usubjid /*type=vc [2]*/;\nrun;\n```\n:::\n\n\n[1] Option to set up the ddff (BW=Between-Within, which is the default). Other options include: infinite (none), Kenwardroger, Residual, etc.\n\n[2] Covariance structure of G (residuals covariance matrix). Variance components (VC) by default.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# GLMM WITH LAPLACE\n\nLaplace is a particular GHQ where only one point is used. In SAS, it can be obtained in the method statement using either `method=quad(qpoints=)` or `method=Laplace`. Both approaches return similar results with slight differences in later decimal places (See Appendix 1).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc glimmix data=resp method=laplace;\nclass trtp(ref=\"A\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW;\nrandom intercept /subject=usubjid /*type=vc*/;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# PENALIZED QUASI-LIKELIHOOD (PQL)\n\nThe PQL approach uses linear approximations instead of likelihood, making it **less accurate for binary outcomes compared to the GHQ or Laplace** methods described above.\n\nIn SAS, this is implemented by default using the Residual Pseudo-Likelihood method (`method=RSPL)`, which is a refinement of PQL which incorporates residual adjustments to better approximate the marginal likelihood, in the GLIMMIX procedure.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc glimmix data=resp method=rspl;\nclass trtp(ref=\"A\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/ dist=bin link=logit solution ddfm=residual;\nrandom intercept /subject=usubjid;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_3.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# SANDWICH SE \n\nAs exposed in the R section, the paper by Li. P. and Redden, D.T. (2015) [2], suggests using the Between-Within denominator degrees of freedom approximation method when using GLMMs in randomized trials with binary outcomes and small sample size, which is the default option in GLIMMIX.\n\nAdditionally, FDA advises \"*sponsors to consider using of robust SE method such as the Huber-White \"sandwich\" SE, particularly when the model does not include treatment by covariate interaction*. [3]\" The Sandwich S.E. is easily obtained by adding the `empirical` option in the procedure.\n\nThe example below is done using GHQ with n=5 points, but it also works for Laplace approximation.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc glimmix data=resp method=quad(qpoints=5) empirical;\nclass trtp(ref=\"A\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW;\nrandom intercept /subject=usubjid;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_4.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# ODDS RATIO\n\nPredicted probabilities and odds ratios (ORs) can be obtained in SAS using the `LSMEANS` statement:\n\n- The ILINK option provides back-transformed predicted probabilities.\n\n- The DIFF option, combined with either EXP or ODDSRATIO, computes ORs.\n\n- CL computes confidence intervals.\n\n The example below is done using GHQ with n=5 points, but it also works for Laplace approximation.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n proc glimmix data=resp method=quad(qpoints=5) empirical;\n class trtp(ref=\"P\") avisitn(ref='1') usubjid; \n model outcome=trtp avisitn trtp*avisitn / dist=bin link=logit solution ddfm=betwithin ;\n lsmeans trtp*avisitn /cl ilink oddsratio diff;\n random intercept /subject=usubjid ;\n ods output LSMeans = Lsmeans Diffs=Diffs1;\n run; \n```\n:::\n\n\n **Predicted Probabilities**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_5.png){fig-align='center' width=50%}\n:::\n:::\n\n\n **Odds Ratio**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_6.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# OUTCOMES WITH MORE THAN 2 CATEGORIES\n\nAlthough less common than binary outcomes, endpoints with more than two categories may be the outcome of interest, which can be either ordinal or nominal. In SAS, similar syntax used for binary outcomes can be applied, by specifying a multinomial distribution and selecting the appropriate link function (cumulative for ordinal outcomes and generalized logit if the variable is nominal).\n\nOne notable limitation is that the `LSMEANS` statement does not work as expected in GLIMMIX with the multinomial distribution. However, ORs can still be obtained by using the `ODDSRATIO` option in the model statement\n\n### Ordinal variable\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc glimmix data=resp method=quad(qpoints=5) empirical;\n\tclass trtp(ref=\"P\") avisitn(ref='1') usubjid; \n\tmodel respord=trtp avisitn trtp*avisitn base / dist=multinomial link=cumlogit solution oddsratio ddfm=betwithin;\n\trandom intercept /subject=usubjid;\n\tods output ParameterEstimates=ParamMulti_ord;\nrun; \n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_7.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### Nominal variable\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc glimmix data=resp method=quad(qpoints=5) ;\n\tclass trtp(ref=\"P\") avisitn(ref='1') usubjid respnom; \n\tmodel respnom(order=freq ref=first)=trtp avisitn trtp*avisitn base / dist=multinomial link=glogit solution ddfm=betwithin ;\n\trandom intercept /subject=usubjid group=respnom;\n\t*lsmeans trtp*avisitn/ diff=control ilink cl oddsratio;\n\tods output ParameterEstimates=ParamMulti_nom;\nrun; \n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_8.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# APPENDIX 1\n\n### Laplace: GLIMMIX with method=Laplace vs method=quad(qpoints=1)\n\n\n::: {.cell}\n\n```{.r .cell-code}\nproc glimmix data=resp method=laplace empirical; \nclass trtp(ref=\"P\") avisitn(ref='1') usubjid; \nmodel outcome=trtp avisitn trtp*avisitn/ dist=bin link=logit solution ddfm=BW; \nrandom intercept /subject=usubjid; \nods output ParameterEstimates=ParameterLaplace; \nrun; \n \nproc glimmix data=resp method=quad(qpoints=1) empirical; \nclass trtp(ref=\"P\") avisitn(ref='1') usubjid; \nmodel outcome=trtp avisitn trtp*avisitn / dist=bin link=logit solution cl ddfm=BW; \nrandom intercept /subject=usubjid; \nods output ParameterEstimates=ParameterGHQ_1p; \nrun; \n\nproc compare base=ParameterLaplace compare=ParameterGHQ_1p criterion=0.00000001; \nrun; \n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_9.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# REFERENCES\n\n[1] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n[2] [Li, P., & Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15, 38.](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/s12874-015-0026-x)\n\n[3] [U.S. Food and Drug Administration. (2023). Adjusting for Covariates in Randomized Clinical Trials for Drugs and Biological Products: Guidance for Industry. Center for Drug Evaluation and Research (CDER), Center for Biologics Evaluation and Research (CBER).](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/adjusting-covariates-randomized-clinical-trials-drugs-and-biological-products)\n\n[4] [SAS/STAT® 13.1 User’s Guide The GLIMMIX Procedure](https://support.sas.com/documentation/onlinedoc/stat/131/glimmix.pdf)\n", + "markdown": "---\ntitle: \"Generalized Linear Mixed Models (GLMM)\"\n---\n\n# INTRODUCTION\n\nGeneralized Linear Mixed Models (GLMM) method combines the characteristics of the Generalized Linear Model (GLM), with mixed models (such a repeated measures over time).  It extends the GLM framework using link functions that relate the predictors to transformed outcome variable.\n\n$$\nE(Y)=\\mu\n$$\n\n$$\ng(\\mu) = X\\beta + Zb, \\qquad b \\sim N(0, G)\n$$\n\nWhere:\n\nn: number of observations, p: number of fixed effects, q: number of random effects (subjects).\n\nY: vector of observed response variable (n x 1)\n\ng: Link function that transforms Y to the linear scale (eg: logit)\n\nX: matrix for fixed effects (n x p), Z: matrix of random effects, G: covariance matrix of the random effects.\n\nB: vector of fixed effects coefficients (p x 1)., b: vector of random effects.\n\n**Link Function:**\n\n- Dichotomous response variable: probit (in case of rare events complementary log-log may be preferable).\n\n- Outcomes with more than two categories:\n\n - Ordinal variable: cumulative\n\n - Nominal variable: generalized logit\n\n**Random Effects**\n\nGLMM are conditional models and estimate subject-average effects, and the intra-subject correlation is modelled via random effects. Unlike GEE models, GLMM models allow individual-level inference.\n\n**Estimation Methods**\n\nMaximum likelihood, based on approximations:\n\n- Gauss Hermite Quadrature (GHQ): Integral split in a given number of points.\n\n- Laplace: A specific case of GHQ, using 1 point.\n\nPenalized Likelihood can be used too, but it is known that in binary data **it underestimates variance components and biased results.**\n\n**Note:** The results shown in this section have been post‑processed to enhance visual comparability. Specifically, the outputs were extracted using the ODS OUTPUT statement and filtered to retain only the relevant values\n\n# EXAMPLE DATA\n\nA SAS data of clinical trial data comparing two treatments for a respiratory disorder available in [\"Gee Model for Binary Data\"](https://documentation.sas.com/doc/en/statug/15.2/statug_code_genmex5.htm) in the SAS/STAT Sample Program Library [1] is used to create these examples.\n\nTo uniquely identify subjects, a new variable USUBJID was created by concatenating SITE and ID. Variables TREATMENT, BASELINE, and VISIT were renamed to TRTP, BASE, and AVISITN.\n\nAdditionally, two variables were created using randomly generated values to simulate variables with more than two categories. One was an ordinal variable with values 1, 2, and 3; the other was a nominal variable with categories 'liver', 'lung', and 'bone'. The following SAS code was used:\n\n```sas\nproc format; \n value respmulti \n 1='Liver' \n 2='Lung' \n 3='Bone'; \nrun; \n \ndata resp; \n set resp1; \n call streaminit(1234); \n respord = rand(\"integer\", 1, 3); *Ordinal; \n respnom = put(respord, respmulti.); *Nominal;\nrun;\n```\n\n# GLMM WITH GHQ\n\nGLMM with GHQ approximation can be fitted using `PROC GLIMMIX` by specifying `quad(qpoints=)` . The random effects are specified with the `RANDOM` statement, while the `TYPE=` option statement can be used to specify the covariance structure of G (variance matrix of the random effects). Variance Components by default (type=VC) is the default option.\n\nUnlike in GEE models, which computed the robust Sandwich S.E. by default, the GLIMMIX procedure displays the model-based S.E. (also called naïve S.E. in R) by default.\n\n```sas\nproc glimmix data=resp method=quad(qpoints=5);\nclass trtp(ref=\"P\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW;/*1*/;\nrandom intercept /subject=usubjid /*type=vc [2]*/;\nrun;\n\n```\n\n[1] Option to set up the ddff (BW=Between-Within, which is the default). Other options include: infinite (none), Kenwardroger, Residual, etc.\n\n[2] Covariance structure of G (residuals covariance matrix). Variance components (VC) by default.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# GLMM WITH LAPLACE\n\nLaplace is a particular GHQ where only one point is used. In SAS, it can be obtained in the method statement using either `method=quad(qpoints=)` or `method=Laplace`. Both approaches return similar results with slight differences in later decimal places (See Appendix 1).\n\n```sas\nproc glimmix data=resp method=laplace;\nclass trtp(ref=\"A\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW;\nrandom intercept /subject=usubjid /*type=vc*/;\nrun;\n\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# PENALIZED QUASI-LIKELIHOOD (PQL)\n\nThe PQL approach uses linear approximations instead of likelihood, making it **less accurate for binary outcomes compared to the GHQ or Laplace** methods described above.\n\nIn SAS, this is implemented by default using the Residual Pseudo-Likelihood method (`method=RSPL)`, which is a refinement of PQL which incorporates residual adjustments to better approximate the marginal likelihood, in the GLIMMIX procedure.\n\n```sas\nproc glimmix data=resp method=rspl;\nclass trtp(ref=\"A\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/ dist=bin link=logit solution ddfm=residual;\nrandom intercept /subject=usubjid;\nrun;\n\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_3.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# SANDWICH SE \n\nAs exposed in the R section, the paper by Li. P. and Redden, D.T. (2015) [2], suggests using the Between-Within denominator degrees of freedom approximation method when using GLMMs in randomized trials with binary outcomes and small sample size, which is the default option in GLIMMIX.\n\nAdditionally, FDA advises \"*sponsors to consider using of robust SE method such as the Huber-White \"sandwich\" SE, particularly when the model does not include treatment by covariate interaction*. [3]\" The Sandwich S.E. is easily obtained by adding the `empirical` option in the procedure.\n\nThe example below is done using GHQ with n=5 points, but it also works for Laplace approximation.\n\n```sas\nproc glimmix data=resp method=quad(qpoints=5) empirical;\nclass trtp(ref=\"A\") avisitn(ref='1') usubjid;\nmodel outcome=trtp avisitn trtp*avisitn/dist=bin link=logit solution ddfm=BW;\nrandom intercept /subject=usubjid;\nrun;\n\n\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_4.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# ODDS RATIO\n\nPredicted probabilities and odds ratios (ORs) can be obtained in SAS using the `LSMEANS` statement:\n\n- The ILINK option provides back-transformed predicted probabilities.\n\n- The DIFF option, combined with either EXP or ODDSRATIO, computes ORs.\n\n- CL computes confidence intervals.\n\n The example below is done using GHQ with n=5 points, but it also works for Laplace approximation.\n\n```sas\nproc glimmix data=resp method=quad(qpoints=5) empirical;\nclass trtp(ref=\"P\") avisitn(ref='1') usubjid; \nmodel outcome=trtp avisitn trtp*avisitn / dist=bin link=logit solution ddfm=betwithin ;\nlsmeans trtp*avisitn /cl ilink oddsratio diff;\nrandom intercept /subject=usubjid ;\nods output LSMeans = Lsmeans Diffs=Diffs1;\nrun; \n\n```\n\n **Predicted Probabilities**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_5.png){fig-align='center' width=50%}\n:::\n:::\n\n\n **Odds Ratio**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_6.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# OUTCOMES WITH MORE THAN 2 CATEGORIES\n\nAlthough less common than binary outcomes, endpoints with more than two categories may be the outcome of interest, which can be either ordinal or nominal. In SAS, similar syntax used for binary outcomes can be applied, by specifying a multinomial distribution and selecting the appropriate link function (cumulative for ordinal outcomes and generalized logit if the variable is nominal).\n\nOne notable limitation is that the `LSMEANS` statement does not work as expected in GLIMMIX with the multinomial distribution. However, ORs can still be obtained by using the `ODDSRATIO` option in the model statement\n\n### Ordinal variable\n\n```sas\nproc glimmix data=resp method=quad(qpoints=5) empirical;\n\tclass trtp(ref=\"P\") avisitn(ref='1') usubjid; \n\tmodel respord=trtp avisitn trtp*avisitn base / dist=multinomial link=cumlogit solution oddsratio ddfm=betwithin;\n\trandom intercept /subject=usubjid;\n\tods output ParameterEstimates=ParamMulti_ord;\nrun; \n\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_7.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### Nominal variable\n\n```sas\nproc glimmix data=resp method=quad(qpoints=5) ;\n\tclass trtp(ref=\"P\") avisitn(ref='1') usubjid respnom; \n\tmodel respnom(order=freq ref=first)=trtp avisitn trtp*avisitn base / dist=multinomial link=glogit solution ddfm=betwithin ;\n\trandom intercept /subject=usubjid group=respnom;\n\t*lsmeans trtp*avisitn/ diff=control ilink cl oddsratio;\n\tods output ParameterEstimates=ParamMulti_nom;\nrun; \n\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_8.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# APPENDIX 1\n\n### Laplace: GLIMMIX with method=Laplace vs method=quad(qpoints=1)\n\n```sas\nproc glimmix data=resp method=laplace empirical; \nclass trtp(ref=\"P\") avisitn(ref='1') usubjid; \nmodel outcome=trtp avisitn trtp*avisitn/ dist=bin link=logit solution ddfm=BW; \nrandom intercept /subject=usubjid; \nods output ParameterEstimates=ParameterLaplace; \nrun; \n \nproc glimmix data=resp method=quad(qpoints=1) empirical; \nclass trtp(ref=\"P\") avisitn(ref='1') usubjid; \nmodel outcome=trtp avisitn trtp*avisitn / dist=bin link=logit solution cl ddfm=BW; \nrandom intercept /subject=usubjid; \nods output ParameterEstimates=ParameterGHQ_1p; \nrun; \n\nproc compare base=ParameterLaplace compare=ParameterGHQ_1p criterion=0.00000001; \nrun; \n\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/glmm/SAS_Image_9.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# REFERENCES\n\n[1] [SAS Institute Inc.. SAS Help Center. The GEE procedure.](https://documentation.sas.com/doc/en/statug/15.2/statug_gee_examples01.htm)\n\n[2] [Li, P., & Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15, 38.](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/s12874-015-0026-x)\n\n[3] [U.S. Food and Drug Administration. (2023). Adjusting for Covariates in Randomized Clinical Trials for Drugs and Biological Products: Guidance for Industry. Center for Drug Evaluation and Research (CDER), Center for Biologics Evaluation and Research (CBER).](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/adjusting-covariates-randomized-clinical-trials-drugs-and-biological-products)\n\n[4] [SAS/STAT® 13.1 User’s Guide The GLIMMIX Procedure](https://support.sas.com/documentation/onlinedoc/stat/131/glimmix.pdf)\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/gsd-tte/execute-results/html.json b/_freeze/SAS/gsd-tte/execute-results/html.json index 44f821062..05300c442 100644 --- a/_freeze/SAS/gsd-tte/execute-results/html.json +++ b/_freeze/SAS/gsd-tte/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "7591a538b5f95c25bfc6678977eb66c6", + "hash": "4b3674405e944666bbc21a3c7fc074ca", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Group Sequential Design in Survival Endpoints Using SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\n# Introduction\n\nPROC SEQDESIGN^1^ can be used for sample size calculations for group sequential design (GSD). SAS provides a [flowchart](https://documentation.sas.com/api/docsets/statug/15.4/content/images/seqchart.png)^2^ which summarizes the steps in a typical group sequential trial and the relevant SAS procedures. Here we focus on a GSD applied for time-to-event endpoints.\n\n# **Log-Rank Test for Two Survival Distributions**\n\nThis example illustrates sample size computation for survival data with the same setting in another [R example](https://psiaims.github.io/CAMIS/R/gsd-tte.html):\n\nA GSD will be utilized for progression-free survival (PFS). PFS will be tested at one interim analysis (IA) at 75% information fraction for both efficacy and non-binding futility. A Lan-DeMets O'Brien-Fleming-type (LD-OBF) spending function will be used for efficacy testing, and a Hwang-Shih-Decani (HSD) spending function (as known as gamma cumulative spending function) with $\\gamma = -10$ will be used for futility. In the GSD, $\\alpha$ is one-sided at 0.025, $\\beta$ is 0.05, the accrual period is 24 months, and the follow-up period is 10 months. As described in the [R example](https://psiaims.github.io/CAMIS/R/gsd-tte.html), the dropout rates are 0.2 by month 12 for the both group; that is, the dropout times follow the exponential distribution with the parameter $\\lambda = -\\ln(1-0.2)/12 = 0.0185953$.\n\nThe SAS code is shown below:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC SEQDESIGN;\n DESIGN NSTAGES=2 \n INFO=CUM(0.75 1.0) \n ALT=UPPER \n ALPHA=0.0125 \n BETA=0.05\n METHOD(ALPHA)=ERRFUNCOBF \n METHOD(BETA)=ERRFUNCGAMMA(GAMMA=-10) \n STOP=BOTH(BETABOUNDARY=NONBINDING);\n SAMPLESIZE MODEL=TWOSAMPLESURVIVAL(\n NULLMEDSURVTIME=9.4\n HAZARDRATIO=0.6\n ACCTIME=24 \n FOLTIME=10\n LOSS=EXP(HAZARD=0.0185953)\n WEIGHT=1);\nRUN;\n```\n:::\n\n\nAs shown below, a total sample size of 398 is recommended, which equates to 199 in each group.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gsd-tte/result-pfs-sas.png){fig-align='center' width=100%}\n:::\n:::\n\n\n### References\n\n1. [The SEQDESIGN procedure: SAS® 9.4 and SAS® Viya® 3.5 Programming Documentation](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_seqdesign_toc.htm)\n2. [Overview: SEQDESIGN Procedure](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_seqdesign_overview.htm)", + "markdown": "---\ntitle: \"Group Sequential Design in Survival Endpoints Using SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\n---\n\n# Introduction\n\nPROC SEQDESIGN^1^ can be used for sample size calculations for group sequential design (GSD). SAS provides a [flowchart](https://documentation.sas.com/api/docsets/statug/15.4/content/images/seqchart.png)^2^ which summarizes the steps in a typical group sequential trial and the relevant SAS procedures. Here we focus on a GSD applied for time-to-event endpoints.\n\n# **Log-Rank Test for Two Survival Distributions**\n\nThis example illustrates sample size computation for survival data with the same setting in another [R example](https://psiaims.github.io/CAMIS/R/gsd-tte.html):\n\nA GSD will be utilized for progression-free survival (PFS). PFS will be tested at one interim analysis (IA) at 75% information fraction for both efficacy and non-binding futility. A Lan-DeMets O'Brien-Fleming-type (LD-OBF) spending function will be used for efficacy testing, and a Hwang-Shih-Decani (HSD) spending function (as known as gamma cumulative spending function) with $\\gamma = -10$ will be used for futility. In the GSD, $\\alpha$ is one-sided at 0.025, $\\beta$ is 0.05, the accrual period is 24 months, and the follow-up period is 10 months. As described in the [R example](https://psiaims.github.io/CAMIS/R/gsd-tte.html), the dropout rates are 0.2 by month 12 for the both group; that is, the dropout times follow the exponential distribution with the parameter $\\lambda = -\\ln(1-0.2)/12 = 0.0185953$.\n\nThe SAS code is shown below:\n\n```sas\nPROC SEQDESIGN;\n DESIGN NSTAGES=2 \n INFO=CUM(0.75 1.0) \n ALT=UPPER \n ALPHA=0.0125 \n BETA=0.05\n METHOD(ALPHA)=ERRFUNCOBF \n METHOD(BETA)=ERRFUNCGAMMA(GAMMA=-10) \n STOP=BOTH(BETABOUNDARY=NONBINDING);\n SAMPLESIZE MODEL=TWOSAMPLESURVIVAL(\n NULLMEDSURVTIME=9.4\n HAZARDRATIO=0.6\n ACCTIME=24 \n FOLTIME=10\n LOSS=EXP(HAZARD=0.0185953)\n WEIGHT=1);\nRUN;\n```\n\nAs shown below, a total sample size of 398 is recommended, which equates to 199 in each group.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/gsd-tte/result-pfs-sas.png){fig-align='center' width=100%}\n:::\n:::\n\n\n### References\n\n1. [The SEQDESIGN procedure: SAS® 9.4 and SAS® Viya® 3.5 Programming Documentation](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_seqdesign_toc.htm)\n2. [Overview: SEQDESIGN Procedure](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_seqdesign_overview.htm)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/jonchkheere_terpstra/execute-results/html.json b/_freeze/SAS/jonchkheere_terpstra/execute-results/html.json deleted file mode 100644 index 64d214eda..000000000 --- a/_freeze/SAS/jonchkheere_terpstra/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "a869b1a88c6ec763beec84bab5a27e96", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"SAS Jonckheere-Terpstra Test\"\nexecute: \n eval: false\n---\n\n## Background\n\nThe Jonckheere-Terpstra (JT) test is a nonparametric method designed to detect ordered differences across categories. It offers an advantageous alternative to more general tests for class differences, such as the Kruskal-Wallis test, particularly when the analysis is conducted using the WILCOXON option within the NPAR1WAY procedure. $^{[1]}$\n\nThe JT test is particularly well-suited for dose-response or trend analysis with ordered categorical data, where the objective is to ascertain whether an increment in dosage leads to a corresponding escalation or reduction in the response variable.$^{[2]}$$^{[5]}$ Unlike other statistical evaluations that might focus on identifying isolated differences between groups, this test is specifically tailored to uncover an overarching trend within the data.\n\n## SAS Procedure\n\nTo request Jonckheere-Terpstra test, specify the **JT** option in the Table statement like below:\n\n```{sas}\nproc freq; \n table Var1 * Var2 / JT; \nQuit;\n```\n\nThe JT option in the TABLES statement provides the Jonckheere-Terpstra test.\n\nPROC FREQ also provides exact p-values for the Jonckheere-Terpstra test. You can request the exact test by specifying the **JT** option in the EXACT statement.$^{[3]}$\n\n## Data used 1\n\nThis dataset has been generated using example data which aligned with the specifications outlined in the section on the Jonckheere–Terpstra test from reference \\[5\\]. It represents the duration of hospital stays for a randomly selected group of patients across three distinct ICU departments: cardiothoracic, medical, and neurosurgical.\n\n```{sas}\ndata ICU_Stay;\n input ICU $ Stay;\n label Stay = 'Length of Stay in Days';\n datalines;\nCardiothoracic 7\nMedical 4\nCardiothoracic 1\nMedical 7\nCardiothoracic 2\nMedical 16\nCardiothoracic 6\nMedical 11\nCardiothoracic 11\nMedical 21\nCardiothoracic 8\nNeurosurgical 20\nNeurosurgical 25\nNeurosurgical 13\nNeurosurgical 9\nNeurosurgical 14\nNeurosurgical 11\n;\nrun;\n\nproc sort data=ICU_Stay;\n by ICU Stay;\nrun;\n```\n\n## Example Code using 1\n\nThe code performs a frequency analysis on the 'ICU_Stay' dataset, examining the relationship between 'ICU' and 'Stay' variables. It applies the Jonckheere-Terpstra test using JT option to identify trends in the ordered categorical 'Stay' variable. The output is streamlined by omitting percentages and totals for columns and rows with the 'nopercent nocol norow' options, emphasizing the Jonckheere-Terpstra test outcomes.\n\n```{sas}\nproc freq data=ICU_Stay; \n table ICU * Stay / JT nopercent nocol norow; \nrun;\n```\n\n## Test Result 1\n\n![Test Result 1](../../CAMIS/images/jonchkheere_terpstra/result1.png \"Test Result 1\")\n\nComparing this with a standard Normal distribution gives a P value of 0.005, indicating that the increase in length of stay with ICU is significant, in the order cardiothoracic, medical and neurosurgical.\n\n## Data used 2\n\nThis dataset incorporates illustrative data extracted from reference \\[3\\]. It encapsulates the responses of subjects randomly assigned to one of four treatment arms: placebo, low dosage(20mg), medium dosage(60mg), and high dosage(180mg). The variable of interest is a continuous measure. The variable 'groupn' is used to provide an order of 'group'.\n\n```{sas}\ndata contin;\n input groupn group $ subject response;\n cards;\n0 Placebo 01 27\n0 Placebo 02 28\n0 Placebo 03 27\n0 Placebo 04 31\n0 Placebo 05 34\n0 Placebo 06 32\n1 20mg 01 31\n1 20mg 02 35\n1 20mg 03 34\n1 20mg 04 32\n1 20mg 05 31\n1 20mg 06 33\n2 60mg 01 32\n2 60mg 02 33\n2 60mg 03 30\n2 60mg 04 34\n2 60mg 05 37\n2 60mg 06 36\n3 180mg 01 40\n3 180mg 02 39\n3 180mg 03 41\n3 180mg 04 38\n3 180mg 05 42\n3 180mg 06 43\n;\nrun;\n```\n\n## Example Code using 2\n\nThe code is performing a Jonckheere-Terpstra trend test on a continuous 'response' variable, categorized by a 'group' variable, using the 'proc freq' procedure. The analysis is applied to the dataset named 'contin'. The result is presented with a title \"Jonckheere-Terpstra Trend Test for Continuous Data\", indicating the specific nature of the test being conducted. The 'JT' option is used to specify the Jonckheere-Terpstra test.\n\n```{sas}\nproc freq data=contin; \n tables group * response/JT; \n title \"Jonckheere-Terpstra Trend Test for Continuous Data\"; \nrun;\n```\n\n## Test Result 2\n\n![Test Result 2](../../CAMIS/images/jonchkheere_terpstra/result2.png \"Test Result 2\")\n\nThere is a significant trend across different groups in the response gives a P value of \\<.0001.\n\n## EXACT Options\n\nWith EXACT statement, the exact version and it Monte Carlo approximation can be also conducted. However, it should be noted that the exact test, i.e., a permuation test takes a long time to compelete the task even for a small dataset.\n\n```{sas}\nproc freq data = inds;\n title \"Asymptotic p-value calculation\";\n table ICU * Stay / jt;\n ods output JTTest = o_jt;\nrun;\n\nproc freq data = inds;\n title \"Approximation of exact test by resampling\";\n table ICU * Stay / jt;\n exact jt / mc seed = 4989 n = 10000 alpha = 0.05;\n ods output JTTestMC = o_jt_sim;\nrun;\n\n```\n\n## Conclusion\n\nThe JT test is particularly useful in scenarios such as dose-response studies in pharmacology, where the interest lies in whether increasing doses of a drug lead to a monotonic increase in the response.\n\n## Reference\n\n\\[1\\] SAS Institute Inc. (n.d.). SAS Help Center. Retrieved August 7, 2024, from https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_freq_details77.htm\n\n\\[2\\] Pennsylvania State University. (n.d.). 11.4 - Safety and Efficacy (Phase II) Studies: Trend Analysis. In STAT 509: Advanced Statistics for the Health Sciences. Retrieved August 7, 2024, from https://online.stat.psu.edu/stat509/lesson/11/11.4\n\n\\[3\\] SAS Institute Inc. (n.d.). FREQ Procedure: Syntax. In SAS/STAT 14.2 User's Guide. Retrieved August 7, 2024, from https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/statug/statug_freq_syntax08.htm\n\n\\[4\\] Park, C., Hsiung, J.-T., Soohoo, M., & Streja, E. (2019). Choosing Wisely: Using the Appropriate Statistical Test for Trend in SAS\n\n\\[5\\] Bewick V, Cheek L, Ball J. Statistics review 10: Further nonparametric methods. Crit Care. 2004;8(4):R131-R139. doi:10.1186/cc468904. PMCID: PMC468904.[PubMed Central](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC468904/).\n\n", - "supporting": [ - "jonchkheere_terpstra_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/kruskal_wallis/execute-results/html.json b/_freeze/SAS/kruskal_wallis/execute-results/html.json index e105933a2..af362d9ff 100644 --- a/_freeze/SAS/kruskal_wallis/execute-results/html.json +++ b/_freeze/SAS/kruskal_wallis/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "9276905a97b835fdbc0db54eedaee2ed", + "hash": "5932bb50cd9fd5c68620975d41133bea", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Kruskal Wallis SAS\"\nexecute: \n eval: false\n---\n\n## Introduction\n\nThe Kruskal-Wallis test is a non-parametric equivalent to the one-way ANOVA. For this example, the data used is a subset of R's datasets::iris, testing for difference in sepal width between species of flower. This data was subset in R and input manually to SAS with a data step.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata iris_sub;\n\t input Species $ Sepal_Width;\n\t datalines;\nsetosa 3.4\nsetosa 3.0\nsetosa 3.4\nsetosa 3.2\nsetosa 3.5\nsetosa 3.1\nversicolor 2.7\nversicolor 2.9\nversicolor 2.7\nversicolor 2.6\nversicolor 2.5\nversicolor 2.5\nvirginica 3.0\nvirginica 3.0\nvirginica 3.1\nvirginica 3.8\nvirginica 2.7\nvirginica 3.3\n;\nrun;\n```\n:::\n\n\n## Implementing Kruskal-Wallis in SAS\n\nThe Kruskal-Wallis test can be implemented in SAS using the NPAR1WAY procedure with WILCOXON option. Below, the test is defined with the indicator variable (Species) by the CLASS statement, and the response variable (Sepal_Width) by the VAR statement. Adding the EXACT statement outputs the exact p-value in addition to the asymptotic result. The null hypothesis is that the samples are from identical populations.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc npar1way data=iris_sub wilcoxon;\n class Species;\n var Sepal_Width;\n exact;\nrun;\n```\n:::\n\n\n## Results\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/kruskalwallis/kw_SAS1.png){fig-align='center' width=90%}\n:::\n:::\n\n\nAs seen above, SAS outputs a table of Wilcoxon Scores for Sepal_Width by each Species including (per group): the number (N); the sum of scores; the expected sum of scores under the null hypothesis; the standard deviation under the null hypothesis, and the observed mean score. The table also includes a footnote to specify that ties were handled by using the average score.\n\nA table of the test results gives the Kruskal-Wallis rank sum statistic (10.922), the degrees of freedom (2), and the asymptotic p-value of the test (0.0042), and the exact p-value (0.0008). Therefore, the difference in population medians is statistically significant at the 5% level.", + "markdown": "---\ntitle: \"Kruskal Wallis SAS\"\n---\n\n## Introduction\n\nThe Kruskal-Wallis test is a non-parametric equivalent to the one-way ANOVA. For this example, the data used is a subset of R's datasets::iris, testing for difference in sepal width between species of flower. This data was subset in R and input manually to SAS with a data step.\n\n```sas\ndata iris_sub;\n\t input Species $ Sepal_Width;\n\t datalines;\nsetosa 3.4\nsetosa 3.0\nsetosa 3.4\nsetosa 3.2\nsetosa 3.5\nsetosa 3.1\nversicolor 2.7\nversicolor 2.9\nversicolor 2.7\nversicolor 2.6\nversicolor 2.5\nversicolor 2.5\nvirginica 3.0\nvirginica 3.0\nvirginica 3.1\nvirginica 3.8\nvirginica 2.7\nvirginica 3.3\n;\nrun;\n```\n\n## Implementing Kruskal-Wallis in SAS\n\nThe Kruskal-Wallis test can be implemented in SAS using the NPAR1WAY procedure with WILCOXON option. Below, the test is defined with the indicator variable (Species) by the CLASS statement, and the response variable (Sepal_Width) by the VAR statement. Adding the EXACT statement outputs the exact p-value in addition to the asymptotic result. The null hypothesis is that the samples are from identical populations.\n\n```sas\nproc npar1way data=iris_sub wilcoxon;\n class Species;\n var Sepal_Width;\n exact;\nrun;\n```\n\n## Results\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/kruskalwallis/kw_SAS1.png){fig-align='center' width=90%}\n:::\n:::\n\n\nAs seen above, SAS outputs a table of Wilcoxon Scores for Sepal_Width by each Species including (per group): the number (N); the sum of scores; the expected sum of scores under the null hypothesis; the standard deviation under the null hypothesis, and the observed mean score. The table also includes a footnote to specify that ties were handled by using the average score.\n\nA table of the test results gives the Kruskal-Wallis rank sum statistic (10.922), the degrees of freedom (2), and the asymptotic p-value of the test (0.0042), and the exact p-value (0.0008). Therefore, the difference in population medians is statistically significant at the 5% level.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/linear-regression/execute-results/html.json b/_freeze/SAS/linear-regression/execute-results/html.json deleted file mode 100644 index e21d2fc7f..000000000 --- a/_freeze/SAS/linear-regression/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "16c3663263f3cd7affba5ebe15f7de3b", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Linear Regression\"\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\nTo demonstrate the use of linear regression we examine a dataset that illustrates the relationship between Height and Weight in a group of 237 teen-aged boys and girls. The dataset is available at (../data/htwt.csv) and is imported to sas using proc import procedure.\n\n### Descriptive Statistics\n\nThe first step is to obtain the simple descriptive statistics for the numeric variables of htwt data, and one-way frequencies for categorical variables. This is accomplished by employing proc means and proc freq procedures There are 237 participants who are from 13.9 to 25 years old. It is a cross-sectional study, with each participant having one observation. We can use this data set to examine the relationship of participants' height to their age and sex.\n\n```{sas}\nproc means data=htwt;\nrun;\n```\n\n``` default\n Descriptive Statistics for HTWT Data Set \n The MEANS Procedure\n\nVariable Label N Mean Std Dev Minimum Maximum\n-----------------------------------------------------------------------------\nAGE AGE 237 16.4430380 1.8425767 13.9000000 25.0000000\nHEIGHT HEIGHT 237 61.3645570 3.9454019 50.5000000 72.0000000\nWEIGHT WEIGHT 237 101.3080169 19.4406980 50.5000000 171.5000000\n----------------------------------------------------------------------------\n```\n\n```{sas}\nproc freq data=htwt;\n tables sex;\nrun;\n```\n\n``` default\n Oneway Frequency Tabulation for Sex for HTWT Data Set \n The FREQ Procedure\n\n Cumulative Cumulative\nSEX Frequency Percent Frequency Percent\n-------------------------------------------------------------\nf 111 46.84 111 46.84\nm 126 53.16 237 100.00\n```\n\nIn order to create a regression model to demonstrate the relationship between age and height for females, we first need to create a flag variable identifying females and an interaction variable between age and female gender flag.\n\n```{sas}\ndata htwt2;\n set htwt;\n if sex=\"f\" then female=1;\n if sex=\"m\" then female=0; \n\n *model to demonstrate interaction between female gender and age;\n fem_age = female * age; \nrun;\n```\n\n### Regression Analysis\n\nNext, we fit a regression model, representing the relationships between gender, age, height and the interaction variable created in the datastep above. We again use a where statement to restrict the analysis to those who are less than or equal to 19 years old. We use the clb option to get a 95% confidence interval for each of the parameters in the model. The model that we are fitting is ***height = b0 + b1 x female + b2 x age + b3 x fem_age + e***\n\n```{sas}\nproc reg data=htwt2;\n where age <=19;\n model height = female age fem_age / clb;\nrun; \nquit;\n```\n\n``` default\n Number of Observations Read 219\n Number of Observations Used 219\n\n Analysis of Variance\n\n Sum of Mean\n Source DF Squares Square F Value Pr > F\n Model 3 1432.63813 477.54604 60.93 <.0001\n Error 215 1684.95730 7.83701\n Corrected Total 218 3117.59543\n\n\n Root MSE 2.79947 R-Square 0.4595\n Dependent Mean 61.00457 Adj R-Sq 0.4520\n Coeff Var 4.58895\n```\n\nWe examine the parameter estimates in the output below.\n\n``` default\n Parameter Estimates\n Parameter Standard\n Variable DF Estimate Error t Value Pr > |t| 95% Confidence Limits\n Intercept 1 28.88281 2.87343 10.05 <.0001 23.21911 34.54650\n female 1 13.61231 4.01916 3.39 0.0008 5.69031 21.53432\n AGE 1 2.03130 0.17764 11.44 <.0001 1.68117 2.38144\n fem_age 1 -0.92943 0.24782 -3.75 0.0002 -1.41791 -0.44096\n```\n\nFrom the parameter estimates table the coefficients b0,b1,b2,b3 are estimated as b0=28.88 b1=13.61 b2=2.03 b3=-0.92942\n\nThe resulting regression model for height, age and gender based on the available data is ***height=28.88281 + 13.61231 x female + 2.03130 x age -0.92943 x fem_age***\n\n", - "supporting": [ - "linear-regression_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/logistic-regr/execute-results/html.json b/_freeze/SAS/logistic-regr/execute-results/html.json deleted file mode 100644 index 2bec4b217..000000000 --- a/_freeze/SAS/logistic-regr/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "c0e09c72be2757af55173b684f7a73aa", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Logistic Regression in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\nFor a brief description of what is logistic regression see [here](../R/logistic_regr.html).\n\n# Modelling the lung cancer data\n\nIn SAS, we can use proc logistic or proc genmod to perform a logistic regression.\n\nTo demonstrate the use of logistic regression we examine the same lung dataset as used in the R example [here](../R/logistic_regr.html).\n\n## **Summary of Common Mistakes in SAS**\n\n1) Handling of missing data. Check SAS output that the number of missing values is as you expect. Make sure you have changed any `NA` results in the raw data to be missing, since SAS would consider `NA` as a valid category (a non-missing character result).\n\n2) Make sure you consider continuous or categorical variables as you intended. Just because a variable is character or numeric in the dataset, doesn't mean SAS will treat it that way in the model. You have to use Class row to tell SAS which variables should be treated as character factors. You also have to use `ref=' '` to tell SAS which is the reference category, otherwise SAS by default which use the last value of the variable alphabetically (e..g a categorical variable with 1, 2, 3 would default to 3 as the reference).\n\n3) Be careful you are modelling the correct event (response vs non-response, or weight_gain vs weight_loss for example)\n\n4) Be careful when interpreting any odds ratios that you have the factor of interest the correct way around (0 vs 1, or 1 vs 0)\n\n5) If using proc logistic, be careful of how SAS creates its parameters used in the model as this determines how you can use the parameter estimates! It is often easiest to use `param=glm` so that the exp(maximum likelihood parameter estimate) = odds ratio. Check the class level information (Design variables) is as you would expect. See below for more detail on `effect` and `ref` parameterization and [here](https://support.sas.com/documentation/cdl/en/statug/63962/HTML/default/viewer.htm#statug_introcom_a0000003337.htm) for more options such as polynomial coding.\n\n6) By default, SAS includes an intercept in the model. The intercept represents the baseline log odds of the outcome when all predictor variables are set to zero. SAS outputs a p-value testing if the baseline log odds is significantly different to zero, however this is not generally of interest because the purpose of our modelling is to find out which parameters have a significant effect on the probability of an event occurring. This baseline log odds is simply shifting the linear expression up or down so that the variable components are most accurate. The baseline log odds, may be not interpretable if it's not possible for some variables to take a value of zero (e.g. age=0 is not yet born!). Therefore, we generally ignore the intercept and instead calculate odds ratios for parameters of interest. How your model is parameterised (param=glm, param=effect, param=ref, can also affect the estimate, so intercept estimates may not align when using different parameterization.\n\n## Modelling using Proc Genmod\n\nProc Genmod is a procedure that allows the fitting of Generalized Linear Models. By using the options `dist=bin` and `link=logit,` it fits a logistic regression as shown below. For more information see the SAS help [here](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_genmod_sect002.htm).\n\nAlways check that the Class Level Information matches what you expect (SAS puts the reference class level last). Also check that you are modelling the correct 'event' and that the algorithm has converged.\n\nBelow we are fitting trt01pn and sex as categorical variables, age, ph_ecog2 and meal_caln as continuous variables.\n\nYou can use exponential of the maximum likelihood parameter estimate and the exponential of the Wald 95% Confidence Limits to obtain the odds ratios and 95% CIs. Proc Genmod uses GLM parameterization.\n\n```{sas}\nExample data: . = missing, trt01pn (1 or 2), sex (1 or 2), ph_ecog2 (0,1,2,3)\nwt_gain (1=gain, 0=no gain)\n```\n\n``` default\nwt_gain trt01pn age sex ph_ecog2 meal_caln\n-----------------------------------------------------------------------------\n. 1 74 1 1 1175\n0 1 68 2 0 1225\n1 2 60 1 2 .\n```\n\n```{sas}\nproc genmod data=lung; \n class trt01pn (ref=\"1\") sex (ref=\"1\"); \n model wt_gain (event=\"1\") = trt01pn age sex ph_ecog2 meal_caln / \n dist=bin link=logit;\nrun;\n```\n\n``` default\nClass Level Information\nClass Levels Values\n-----------------------------------------------------------------------------\ntrt01pn 2 2 1 \nsex 2 2 1\n-----------------------------------------------------------------------------\n\nResponse Profile\nOrdered value wt_gain Total Frequency\n-----------------------------------------------------------------------------\n1 1 48 \n2 0 122\n-----------------------------------------------------------------------------\n\nPROC GENMOD is modeling the probability that wt_gain='1'. \n\nAlgorithm Converged.\n\n Analysis of Maximum Likelihood Estimates \n\nParameter DF Estimate Standard Wald 95% Wald Pr>ChiSq\n Error CIs Chi-Square\n-----------------------------------------------------------------------------\nIntercept 1 -2.6415 1.5140 -5.6090 0.3259 3.04 0.0810\ntrt01pn 2 1 0.3888 0.3782 -0.3524 1.1299 1.03 0.3039\ntrt01pn 1 0 0.0000 0.0000 0.0000 0.0000 . .\nage 1 0.0123 0.0212 -0.0292 0.0537 0.34 0.5624 \nsex 2 1 0.8321 0.3744 0.0983 1.5659 4.97 0.0262 \nsex 1 0 0.0000 0.0000 0.0000 0.0000 . .\nph_ecog 1 -0.3764 0.2638 -0.8935 0.1407 2.03 0.1537 \nmeal_cal 1 -0.0008 0.0004 -0.0000 0.0017 3.59 0.0581 \nscale 0 1.0000 0.0000 1.0000 1.0000\n----------------------------------------------------------------------------\nNote: The scale parameter was held fixed\n```\n\n## Modelling using Proc Logistic\n\nThe same model above can also be modelled using Proc Logistic. You no longer have to specify the distribution and link function, however you do need to add an option `/ param=glm` on the class row. Different parameterizations are discussed later in the context of forming treatment contrast statements.\n\nFor now, all you need to know is that using `/param=glm` ensures exp(estimates)=odds ratio. You will also note that in the `class level information`, SAS now tells you the `design variables`. This will also be important later when we learn more about parameterization.\n\nProc Logistic is often preferred above Proc Genmod as it outputs the Odds Ratios and 95% CIs for you, without you having to back transform them using exponential of the MLEs yourself.\n\nNOTE: that the 95% confidence limits are being calculated using the Wald method. This assumes symmetric intervals around the maximum likelihood estimate using a normal distribution assumption (MLE +/-1.96\\* SE). Alternative confidence interval estimation methods exist such as the profile likelihood method but SAS does not calculate these.\n\n```{sas}\nproc logistic data=lung; \n class trt01pn (ref=\"1\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = trt01pn age sex ph_ecog2 meal_caln;\nrun;\n```\n\n``` default\nResponse Profile\nOrdered value wt_gain Total Frequency\n-----------------------------------------------------------------------------\n1 0 122 \n2 1 48\n-----------------------------------------------------------------------------\nProbability modeled is wt_gain=1\n\nNote: 58 observations were deleted due to missing values fro the repsonse or\nexplanatory variables.\n\nClass Level Information\nClass Levels Design Variables\n-----------------------------------------------------------------------------\ntrt01pn 2 1 0 \n 1 0 1\nsex 2 1 0\n 1 0 1\n-----------------------------------------------------------------------------\nConvergence criterion (GCONV=1E-8) satisfied.\n\n Analysis of Maximum Likelihood Estimates \n\nParameter DF Estimate Standard Wald Pr>ChiSq\n Error Chi-Square\n-----------------------------------------------------------------------------\nIntercept 1 -2.6415 1.5140 3.0440 0.0810\ntrt01pn 2 1 0.3888 0.3782 1.0569 0.3039\ntrt01pn 1 0 0.0000 . . .\nage 1 0.0123 0.0212 0.3356 0.5624 \nsex 2 1 0.8321 0.3744 4.9400 0.0262 \nsex 1 0 0.0000 . . .\nph_ecog 1 -0.3764 0.2638 2.0349 0.1537 \nmeal_cal 1 -0.000850 0.000449 3.5895 0.0581 \n----------------------------------------------------------------------------\n \n Odds Ratio Estimates \n\nEffect Point Estimate 95% Wald Confidence Limits\n-----------------------------------------------------------------------------\ntrt01pn 2 vs 1 1.475 0.703 3.095\nage 1.012 0.971 1.055\nsex 2 vs 1 2.298 1.103 4.787 \nph_ecog 0.686 0.409 1.151 \nmeal_cal 1.001 1.000 1.002 \n----------------------------------------------------------------------------\n```\n\n## **Model Comparison**\n\nTo compare two logistic models, the -2 \\* Log Likelihood from each model can be compared against a $\\chi^2$-distribution with degrees of freedom calculated using the difference in the two models' parameters.\n\n```{sas}\nModel 1: model wt_gain(event=\"1\") = trt01pn age sex ph_ecog2 meal_caln;\n```\n\n``` default\nModel Fit Statistics\nCriterion Intercept Only Intercept and Covariates \n--------------------------------------------------------\nAIC 204.355 202.460\nSC 207.491 221.274\n-2 Log L 202.355 190.460 \n--------------------------------------------------------\n\nModel 2: model wt_gain(event=\"1\") = trt01pn sex ph_ecog2 meal_caln;\n\nModel Fit Statistics\nCriterion Intercept Only Intercept and Covariates \n--------------------------------------------------------\nAIC 204.355 200.798\nSC 207.491 216.477\n-2 Log L 202.355 190.798 \n--------------------------------------------------------\n\n190.460 - 190.798 = -0.338 which using a $\\chi^2$-distribution corresponds to p=0.5606\n```\n\nSAS also allows us to fit forward or backwards stepwise selection. Below we specify to stop when we have 4 variables left in the model. This is not commonly done in practice but is included to highlight the difference in using a selection procedure compared to doing the difference betweeen the -2 \\* log likelihood models using a $\\chi^2$-distribution.\n\n```{sas}\nproc logistic data=lung; \n class trt01pn (ref=\"1\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = trt01pn age sex ph_ecog2 meal_caln/ \n selection=backward stop=4;\nrun;\n```\n\n``` default\nStep 1: Effect age is removed\nSummary of Backward Elimination\nStep Effect Removed DF Number In Wald Chi-Square Pr>ChiSq\n-----------------------------------------------------------------------------\n1 Age 1 4 0.3356 0.5624 \n-----------------------------------------------------------------------------\n```\n\nNOTE: the chi-square test summary of backward elimination, p=0.5624 is slightly different to using the -2 \\* log likelihood models using a $\\chi^2$-distribution p=0.5606.\n\nThis is because the backward elimination process in SAS uses the residual sums of squares and the F statistic. Starting with the full model, it removes the parameter with the least significant F statistic until all effects in the model have F statistics significant as a certain level. The F statistic is calculated as:\n\n$$F=\\frac{(RSS_{p-k}-RSS_p)/k}{RSS_p /(n-p-k)}$$ where RSS = Residual sums of squares, n=number of observations in the analysis, p=number of parameters in fuller model (exc. intercept), k=number of degrees of freedom associated with the effect you are dropping, $$RSS_p$$ =RSS for the fuller model, $$RSS_{p-k}$$ = RSS for the reduced model.\n\n## Parameterization of model effects (categorical covariates) in SAS\n\nThe most common problem when fitting logistic regression in SAS, is getting SAS to model the binary variable (events) and any categorical covariates correctly. Using `proc genmod` (using dist=bin and link=logit options), there is no issue as SAS defaults to using GLM parameterization. However using `proc logistic` there are three ways to parameterize categorical variables, and the default is `/PARAM=EFFECT` which can cause confusion when interpreting your model.\n\nTo demonstrate, we will now model a categorical variable called Dose, which has 3 treatment levels (1=10mg Active, 2=20 mg Active, 3=Placebo). The reference is now dose=3. You must pay close attention to the table of Class level information in order to understand how SAS is modelling your data.\n\n### CLASS X Y Z /PARAM=Effect\n\nThis is the SAS default such that if you do not specify the `/param` option, SAS defaults to using this method.\n\nWith the EFFECT option, dose_id has 3 levels, and so needs 2 design variables (β1 and β2). Sex has 2 levels so uses just 1 design variable (β1). For dose_id, the reference level (Placebo) is given values of \"-1\" for both of the β1 and β2 parameters. General model: Y= α + β1x1 + β2x1 {+β3x3} etc, representing each parameter in the model.\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=effect;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\nrun;\n```\n\n``` default\nClass Level Information\nClass Value Design Variables\n--------------------------------------------------------\n dose_β1 dose_β2\ndose_id 1 1 0\n 2 0 1\n 3 -1 -1\n \n sex_β3 \nSEX 1 -1\n 2 1\n--------------------------------------------------------\n```\n\nIf we want to estimate the effect of treatment (ignoring the other covariates), the Class Level Information can be translated into the table below, which is then used to form contrast statements.\n\n``` default\nα is the intercept, and β1 and β2 (including the sign +/-) are from the design variables above.\n \nDose_id Effect \n------------------------------\n10mg Active Y = α + β1 \n20mg Active Y = α + β2 \nPlacebo Y = α - β1 - β2 \n------------------------------\n```\n\nTo compare 10mg Active vs Placebo, we would do the following:\n\n``` default\n(α + β1 ) - (α - β1 - β2)\n= 2 β1 + β2 \n= 2 β1 + 1 β2\n```\n\nThis equates to a contrast statement as follows:\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=effect;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\n CONTRAST \"10mg vs. Placebo\" dose_id 2 1 / e; \nrun;\n```\n\nTo compare the average of (10mg Active and 20mg Active) vs Placebo, we would do the following:\n\n``` default\n((α + β1 + α + β2 ) /2 ) - (α - β1 - β2)\n= α + 0.5 β1 + 0.5 β2 - α + β1 + β2 \n= 1.5 β1 + 1.5 β2\n```\n\nThis equates to a contrast statement as follows:\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=effect;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\n CONTRAST \"Active (10mg + 20mg) vs. Placebo\" dose_id 1.5 1.5 / e; \nrun;\n```\n\nAs you can see, these contrasts are not very intuitive and hence it is not reccomended to use the default SAS option of /param=effect, since its easy to end up with the wrong contrasts.\n\n``` default\nContract Test Results\nContrast DF Wald Chi-Square Pr>ChiSq\n--------------------------------------------------------\nActive (10mg +20mg) vs Placebo 1 1.1610 0.2813\n```\n\n### CLASS X Y Z /PARAM=glm\n\nNow let's look at the `param=glm` option. GLM parameterization has a design variable for each level of a parameter. Hence for dose with 3 levels, we have 3 design variables (β1, β2 and β3).\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\nrun;\n```\n\n``` default\nClass Level Information\nClass Value Design Variables\n--------------------------------------------------------\n dose_β1 dose_β2 dose_β3\ndose_id 1 1 0 0\n 2 0 1 0 \n 3 0 0 1\n \n sex_β3 sex_β4 \nSEX 1 1 0\n 2 0 1\n--------------------------------------------------------\n```\n\nIf we want to estimate the effect of treatment (ignoring the other covariates), the Class Level Information can be translated into the table below, which is then used to form contrast statements.\n\n``` default\nα is the intercept, and β1 and β2 and β3 (including the sign +/-) are from the design variables above.\n \nDose_id Effect \n------------------------------\n10mg Active Y = α + β1 \n20mg Active Y = α + β2 \nPlacebo Y = α - β3 \n--------------------------------------------------------\n```\n\nTo compare 10mg Active vs Placebo, we would do the following:\n\n``` default\n(α + β1 ) - (α - β3)\n= β1 - β3 \n= 1 β1 - 1 β3\n```\n\nThis equates to a contrast statement as follows:\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\n CONTRAST \"10mg vs. Placebo\" trt 1 -1 / e; \nrun;\n```\n\nAs you can see, this contrast is much more intuitive. If you want to compare the effect of Active (10mg) compared to placebo, you take the effect of 10mg and subtract the effect of placebo !\n\nTo compare the average of (10mg Active and 20mg Active) vs Placebo, we would do the following:\n\n``` default\n(α + β1 + α + β2)/2 - (α + β3) \n= α + 0.5 β1 + 0.5 β2 - α - β3 \n= 0.5 β1 + 0.5 β2 - β3\n```\n\nThis equates to a contrast statement as follows:\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\nCONTRAST \"Active (10mg + 20mg) vs. Placebo (1)\" trt 0.5 0.5 -1 / e; \nrun;\n```\n\nAs you can see, this contrast is much more intuitive. If you want to compare the average of Active (10mg + 20mg) compared to placebo, you take half the effect of 10mg plus half the effect of 20mg and substract the effect of placebo!\n\n``` default\nContract Test Results\nContrast DF Wald Chi-Square Pr>ChiSq\n--------------------------------------------------------\nActive (10mg +20mg) vs Placebo 1 1.1610 0.2813\n```\n\n### CLASS X Y Z /PARAM=Ref\n\nNow let's look at the `param=ref` option. Similar to param=effect, ref parameterization uses 1 less design variable compared to the number of levels each parameter has, but the parameterization is different. For dose with 3 levels, we have 2 design variables (β1 and β2).\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=ref;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\nrun;\n```\n\n``` default\nClass Level Information\nClass Value Design Variables\n--------------------------------------------------------\n dose_β1 dose_β2 \ndose_id 1 1 0 \n 2 0 1 \n 3 0 0 \n \n sex_β3 \nSEX 1 0 \n 2 1 \n--------------------------------------------------------\n```\n\nIf we want to estimate the effect of treatment (ignoring the other covariates), the Class Level Information can be translated into the table below, which is then used to form contrast statements.\n\n``` default\nα is the intercept, and β1 and β2 and β3 (including the sign +/-) are from the design variables above.\n \nDose_id Effect \n------------------------------\n10mg Active Y = α + β1 \n20mg Active Y = α + β2 \nPlacebo Y = α \n--------------------------------------------------------\n```\n\nTo compare 10mg Active vs Placebo, we would do the following:\n\n``` default\n(α + β1 ) - (α ) = β1 \n```\n\nThis equates to a contrast statement as follows:\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\n CONTRAST \"10mg vs. Placebo\" trt 1 / e; \nrun;\n```\n\nTo compare the average of (10mg Active and 20mg Active) vs Placebo, we would do the following:\n\n``` default\n(α + β1 + α + β2)/2 - α \n= α + 0.5 β1 + 0.5 β2 - α \n= 0.5 β1 + 0.5 β2\n```\n\nThis equates to a contrast statement as follows:\n\n```{sas}\nproc logistic data=lung; \n class dose_id (ref=\"3\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln;\nCONTRAST \"Active (10mg + 20mg) vs. Placebo (1)\" trt 0.5 0.5 / e; \nrun;\n```\n\nAgain this is less intuitive than the param=glm parameterization, but the same results are obtained.\n\n``` default\nContract Test Results\nContrast DF Wald Chi-Square Pr>ChiSq\n--------------------------------------------------------\nActive (10mg +20mg) vs Placebo 1 1.1610 0.2813\n```\n\n# Contrast statements for 2 or more treatments\n\nThe Contrast statement, only outputs the p-value for the contrast, but it is common to also require an estimate of the difference between the treatments, with associated 95% CI. You can do this by changing `contrast` to an `estimate` statement. Note that the parameterization of the contrast remains the same as when using a contrast statement as shown below. These estimates and 95% CI's can be back transformed to give you the Odds ratio of the contrast and associated 95% CI. The estimate coefficients table should be checked for accuracy versus the contrast you are trying to do.\n\n```{sas}\nproc logistic data=lung;\n class dose_id (ref=\"3\") sex (ref=\"1\") /param=glm;\n model wt_gain(event=\"1\") = dose_id age sex ph_ecog2 meal_caln; \n Estimate \"Active (10mg + 20mg) vs. Placebo (1)\" dose_id 0.5 0.5 -1 / e cl; \nrun;\n```\n\n``` default\nEstimate Coefficients\nParameter dose_id sex Row1\n--------------------------------------------------------\ndose_id 1 1 0.5\ndose_id 2 2 0.5 \ndose_id 3 3 -1\n--------------------------------------------------------\n\nEstimate\nLabel estimate Std Err Z value Pr>|z| Alpha Lower Upper\n--------------------------------------------------------------------------\nActive (10mg +20mg) vs. Placebo (1)\n -0.4096 0.3802 -1.08 0.2813 0.05 -1.1547 0.3355 \n--------------------------------------------------------------------------\n\nThe odds ratio for the comparison of Active (10mg +20mg) vs. Placebo is \n exp(-0.4096) (95% CI: exp(-1.1547) to exp(0.3355))\n= OR: 0.664 (95%CI for OR: 0.315 to 1.399)\n```\n\n# Ensuring you are modelling the correct Binary event in SAS\n\nWith logistic regression, we often want to model the number of \"Successes\". However, by default, SAS sorts alphabetically/numerically and selects the first occurring EVENT alphabetically as the one it's going to model.\n\nIt's a common mistake, and we find SAS modelling the number of failures instead of successes. Very common when your response is: 'Responder' vs 'Non-responder', SAS will model the Non-responders as 'N' is alphabetically first before 'R'!\n\nFor this reason, It is recommended to always use the event=\"Y\" option.\n\nOptions such as `ORDER=DATA|FORMATTED|FREQ|INTERNAL` as well as descending can be used to ensure the correct levels of classification variables are being modelled. More detail [here](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_logistic_sect006.htm)\n\n# References\n\nCreated using : SAS release:9.04.01M7P08062020\"\n\n", - "supporting": [ - "logistic-regr_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/manova/execute-results/html.json b/_freeze/SAS/manova/execute-results/html.json index a00a65928..045afb0ce 100644 --- a/_freeze/SAS/manova/execute-results/html.json +++ b/_freeze/SAS/manova/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "b25b7fc1a5ed367c7f032d5d701267e9", + "hash": "b7e14f3eb558581388d4d4de0f17298d", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Multivariate Analysis of Variance in SAS\"\nexecute: \n eval: false\n---\n\n**Example 39.6 Multivariate Analysis of Variance** from [SAS MANOVA User Guide](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_glm_sect051.htm)\n\nThis example employs multivariate analysis of variance (MANOVA) to measure differences in the chemical characteristics of ancient pottery found at four kiln sites in Great Britain. The data are from Tubb, Parker, and Nickless (1980), as reported in Hand et al. (1994).\n\nFor each of 26 samples of pottery, the percentages of oxides of five metals are measured. The following statements create the data set and invoke the GLM procedure to perform a one-way MANOVA. Additionally, it is of interest to know whether the pottery from one site in Wales (Llanederyn) differs from the samples from other sites; a CONTRAST statement is used to test this hypothesis.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n# Example code\ntitle \"Romano-British Pottery\";\ndata pottery;\n input Site $12. Al Fe Mg Ca Na;\n datalines;\n Llanederyn 14.4 7.00 4.30 0.15 0.51\n Llanederyn 13.8 7.08 3.43 0.12 0.17\n Llanederyn 14.6 7.09 3.88 0.13 0.20\n Llanederyn 11.5 6.37 5.64 0.16 0.14\n Llanederyn 13.8 7.06 5.34 0.20 0.20\n Llanederyn 10.9 6.26 3.47 0.17 0.22\n Llanederyn 10.1 4.26 4.26 0.20 0.18\n Llanederyn 11.6 5.78 5.91 0.18 0.16\n Llanederyn 11.1 5.49 4.52 0.29 0.30\n Llanederyn 13.4 6.92 7.23 0.28 0.20\n Llanederyn 12.4 6.13 5.69 0.22 0.54\n Llanederyn 13.1 6.64 5.51 0.31 0.24\n Llanederyn 12.7 6.69 4.45 0.20 0.22\n Llanederyn 12.5 6.44 3.94 0.22 0.23\n Caldicot 11.8 5.44 3.94 0.30 0.04\n Caldicot 11.6 5.39 3.77 0.29 0.06\n IslandThorns 18.3 1.28 0.67 0.03 0.03\n IslandThorns 15.8 2.39 0.63 0.01 0.04\n IslandThorns 18.0 1.50 0.67 0.01 0.06\n IslandThorns 18.0 1.88 0.68 0.01 0.04\n IslandThorns 20.8 1.51 0.72 0.07 0.10\n AshleyRails 17.7 1.12 0.56 0.06 0.06\n AshleyRails 18.3 1.14 0.67 0.06 0.05\n AshleyRails 16.7 0.92 0.53 0.01 0.05\n AshleyRails 14.8 2.74 0.67 0.03 0.05\n AshleyRails 19.1 1.64 0.60 0.10 0.03\n;\nrun;\n\nproc glm data=pottery;\n class Site;\n model Al Fe Mg Ca Na = Site;\n contrast 'Llanederyn vs. the rest' Site 1 1 1 -3;\n manova h=_all_ / printe printh;\nrun;\n```\n:::\n\n\nAfter the summary information (1), PROC GLM produces the univariate analyses for each of the dependent variables (2-6). These analyses show that sites are significantly different for all oxides individually. You can suppress these univariate analyses by specifying the NOUNI option in the MODEL statement.\n\n**1 Summary Information about Groups**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova1_class.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**2 Univariate Analysis of Variance for Aluminum Oxide (AI)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova2_anova_ao.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**3 Univariate Analysis of Variance for Iron Oxide (Fe)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova3_anova_fe.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**4 Univariate Analysis of Variance for Calcium Oxide (Ca)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova4_anova_ca.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**5 Univariate Analysis of Variance for Magnesium Oxide (Mg)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova5_anova_mg.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**6 Analysis of Variance for Sodium Oxide (Na)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova6_anova_na.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\nThe PRINTE option in the MANOVA statement displays the elements of the error matrix (7), also called the Error Sums of Squares and Crossproducts matrix. The diagonal elements of this matrix are the error sums of squares from the corresponding univariate analyses.\n\nThe PRINTE option also displays the partial correlation matrix (7) associated with the E matrix. In this example, none of the oxides are very strongly correlated; the strongest correlation (r=0.488) is between magnesium oxide and calcium oxide.\n\n**7 Error SSCP Matrix and Partial Correlations**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova7_error.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\nThe PRINTH option produces the SSCP matrix for the hypotheses being tested (Site and the contrast); (8 and 9). Since the Type III SS are the highest-level SS produced by PROC GLM by default, and since the HTYPE= option is not specified, the SSCP matrix for Site gives the Type III H matrix. The diagonal elements of this matrix are the model sums of squares from the corresponding univariate analyses.\n\nFour multivariate tests are computed, all based on the characteristic roots and vectors of $E^{-1}H$. These roots and vectors are displayed along with the tests. All four tests can be transformed to variates that have distributions under the null hypothesis. Note that the four tests all give the same results for the contrast, since it has only one degree of freedom. In this case, the multivariate analysis matches the univariate results: there is an overall difference between the chemical composition of samples from different sites, and the samples from Llanederyn are different from the average of the other sites.\n\n**8 Hypothesis SSCP Matrix and Multivariate Tests for Overall Site Effect**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova8_hyp_tests.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**9 Hypothesis SSCP Matrix and Multivariate Tests for Differences between Llanederyn and the Other Sites**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova9_hyp_tests_llane.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**References**\n\n[SAS MANOVA User Guide](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_glm_sect051.htm)", + "markdown": "---\ntitle: \"Multivariate Analysis of Variance in SAS\"\n---\n\n**Example 39.6 Multivariate Analysis of Variance** from [SAS MANOVA User Guide](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_glm_sect051.htm)\n\nThis example employs multivariate analysis of variance (MANOVA) to measure differences in the chemical characteristics of ancient pottery found at four kiln sites in Great Britain. The data are from Tubb, Parker, and Nickless (1980), as reported in Hand et al. (1994).\n\nFor each of 26 samples of pottery, the percentages of oxides of five metals are measured. The following statements create the data set and invoke the GLM procedure to perform a one-way MANOVA. Additionally, it is of interest to know whether the pottery from one site in Wales (Llanederyn) differs from the samples from other sites; a CONTRAST statement is used to test this hypothesis.\n\n```sas\n# Example code\ntitle \"Romano-British Pottery\";\ndata pottery;\n input Site $12. Al Fe Mg Ca Na;\n datalines;\n Llanederyn 14.4 7.00 4.30 0.15 0.51\n Llanederyn 13.8 7.08 3.43 0.12 0.17\n Llanederyn 14.6 7.09 3.88 0.13 0.20\n Llanederyn 11.5 6.37 5.64 0.16 0.14\n Llanederyn 13.8 7.06 5.34 0.20 0.20\n Llanederyn 10.9 6.26 3.47 0.17 0.22\n Llanederyn 10.1 4.26 4.26 0.20 0.18\n Llanederyn 11.6 5.78 5.91 0.18 0.16\n Llanederyn 11.1 5.49 4.52 0.29 0.30\n Llanederyn 13.4 6.92 7.23 0.28 0.20\n Llanederyn 12.4 6.13 5.69 0.22 0.54\n Llanederyn 13.1 6.64 5.51 0.31 0.24\n Llanederyn 12.7 6.69 4.45 0.20 0.22\n Llanederyn 12.5 6.44 3.94 0.22 0.23\n Caldicot 11.8 5.44 3.94 0.30 0.04\n Caldicot 11.6 5.39 3.77 0.29 0.06\n IslandThorns 18.3 1.28 0.67 0.03 0.03\n IslandThorns 15.8 2.39 0.63 0.01 0.04\n IslandThorns 18.0 1.50 0.67 0.01 0.06\n IslandThorns 18.0 1.88 0.68 0.01 0.04\n IslandThorns 20.8 1.51 0.72 0.07 0.10\n AshleyRails 17.7 1.12 0.56 0.06 0.06\n AshleyRails 18.3 1.14 0.67 0.06 0.05\n AshleyRails 16.7 0.92 0.53 0.01 0.05\n AshleyRails 14.8 2.74 0.67 0.03 0.05\n AshleyRails 19.1 1.64 0.60 0.10 0.03\n;\nrun;\n\nproc glm data=pottery;\n class Site;\n model Al Fe Mg Ca Na = Site;\n contrast 'Llanederyn vs. the rest' Site 1 1 1 -3;\n manova h=_all_ / printe printh;\nrun;\n```\n\nAfter the summary information (1), PROC GLM produces the univariate analyses for each of the dependent variables (2-6). These analyses show that sites are significantly different for all oxides individually. You can suppress these univariate analyses by specifying the NOUNI option in the MODEL statement.\n\n**1 Summary Information about Groups**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova1_class.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**2 Univariate Analysis of Variance for Aluminum Oxide (AI)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova2_anova_ao.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**3 Univariate Analysis of Variance for Iron Oxide (Fe)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova3_anova_fe.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**4 Univariate Analysis of Variance for Calcium Oxide (Ca)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova4_anova_ca.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**5 Univariate Analysis of Variance for Magnesium Oxide (Mg)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova5_anova_mg.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**6 Analysis of Variance for Sodium Oxide (Na)**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova6_anova_na.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\nThe PRINTE option in the MANOVA statement displays the elements of the error matrix (7), also called the Error Sums of Squares and Crossproducts matrix. The diagonal elements of this matrix are the error sums of squares from the corresponding univariate analyses.\n\nThe PRINTE option also displays the partial correlation matrix (7) associated with the E matrix. In this example, none of the oxides are very strongly correlated; the strongest correlation (r=0.488) is between magnesium oxide and calcium oxide.\n\n**7 Error SSCP Matrix and Partial Correlations**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova7_error.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\nThe PRINTH option produces the SSCP matrix for the hypotheses being tested (Site and the contrast); (8 and 9). Since the Type III SS are the highest-level SS produced by PROC GLM by default, and since the HTYPE= option is not specified, the SSCP matrix for Site gives the Type III H matrix. The diagonal elements of this matrix are the model sums of squares from the corresponding univariate analyses.\n\nFour multivariate tests are computed, all based on the characteristic roots and vectors of $E^{-1}H$. These roots and vectors are displayed along with the tests. All four tests can be transformed to variates that have distributions under the null hypothesis. Note that the four tests all give the same results for the contrast, since it has only one degree of freedom. In this case, the multivariate analysis matches the univariate results: there is an overall difference between the chemical composition of samples from different sites, and the samples from Llanederyn are different from the average of the other sites.\n\n**8 Hypothesis SSCP Matrix and Multivariate Tests for Overall Site Effect**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova8_hyp_tests.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**9 Hypothesis SSCP Matrix and Multivariate Tests for Differences between Llanederyn and the Other Sites**\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/manova/manova9_hyp_tests_llane.jpg){fig-align='center' width=100%}\n:::\n:::\n\n\n**References**\n\n[SAS MANOVA User Guide](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_glm_sect051.htm)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/mcnemar/execute-results/html.json b/_freeze/SAS/mcnemar/execute-results/html.json index 03c0989e9..26cae0807 100644 --- a/_freeze/SAS/mcnemar/execute-results/html.json +++ b/_freeze/SAS/mcnemar/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "5ddc8ad20fca79af4391792c3de33a97", + "hash": "bdddf92ea9f7e5d58d37a6a4d4a37431", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"McNemar's test in SAS\"\nexecute: \n eval: false\n---\n\n### Performing McNemar's test in SAS\n\nTo demonstrate McNemar's test in SAS, data concerning the presence or absence of cold symptoms was used. The symptoms were recorded by the same children at the age of 12 and 14. A total of 2638 participants were involved.\n\n#### Using PROC FREQ\n\nTesting for a significant difference in cold symptoms between ages, using McNemar's test in SAS, can be performed as below. The AGREE option is stated within the FREQ procedure to produce agreement tests and measures, including McNemar's test.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data=colds;\n tables age12*age14 / agree;\nrun;\n```\n:::\n\n\n#### Results\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mcnemar/sas-mcnemar.png){fig-align='center' width=40%}\n:::\n:::\n\n\nSAS outputs the tabulated data for proportions, the McNemar's Chi-square statistic, and the Kappa coefficient with 95% confidence limits. There is no continuity correction used and no option to include this.", + "markdown": "---\ntitle: \"McNemar's test in SAS\"\n---\n\n### Performing McNemar's test in SAS\n\nTo demonstrate McNemar's test in SAS, data concerning the presence or absence of cold symptoms was used. The symptoms were recorded by the same children at the age of 12 and 14. A total of 2638 participants were involved.\n\n#### Using PROC FREQ\n\nTesting for a significant difference in cold symptoms between ages, using McNemar's test in SAS, can be performed as below. The AGREE option is stated within the FREQ procedure to produce agreement tests and measures, including McNemar's test.\n\n```sas\nproc freq data=colds;\n tables age12*age14 / agree;\nrun;\n```\n\n#### Results\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mcnemar/sas-mcnemar.png){fig-align='center' width=40%}\n:::\n:::\n\n\nSAS outputs the tabulated data for proportions, the McNemar's Chi-square statistic, and the Kappa coefficient with 95% confidence limits. There is no continuity correction used and no option to include this.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/mi_mar_regression/execute-results/html.json b/_freeze/SAS/mi_mar_regression/execute-results/html.json index 035505050..dde169104 100644 --- a/_freeze/SAS/mi_mar_regression/execute-results/html.json +++ b/_freeze/SAS/mi_mar_regression/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "67ebd49ca88ef5ccef725c868c61eb88", + "hash": "c3cdf801ddd444b9dcedfac02b5f86ab", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Multiple Imputaton: Linear Regression in SAS\"\nexecute: \n eval: false\n---\n\n## Input dataset preparation before multiple imputation\n\n1. Prepare a subset of the analysis dummy dataset, details as below:\n\n- `USUBJID` (length 4): Subject ID.\n- `SEX1N`: Sex A random integer between 0 and 1 representing a binary variable (perhaps gender).\n- `AVISITN`: Visit number (1 to 5 for each subject).\n- `AVAL`: A random value between 1 and 2, with a random 10% chance of being missing.\n\nAs PROC MI requires a horizontal, one record per subject data set. More often than not, the data we impute will come from a vertical ADaM BDS data set. So we need to first transpose the aval with the avisitn as ID (assuming avisitn = 1 to 5),creating transposed variable v1-v5.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata dummy;\n length USUBJID $4;\n do i=1 to 10;\n sex1n=int(ranuni(0)*2);\n do j=1 to 5;\n USUBJID=strip(put(1000+i,best.));\n AVISITN=j;\n AVAL=round(1+ranuni(0),0.01);\n if ranuni(0) <0.1 then aval=.;\n output;\n end;\n end;\n drop i j;\nrun;\n\nproc sort data=dummy; \n by usubjid sex1n;\nrun;\n\nproc transpose data=dummy out=dummyt(drop=_name_) prefix=v;\n by USUBJID sex1n;\n id avisitn;\n var aval;\nrun;\n\nproc print data=dummyt(obs=5);\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_view_Data.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n## Check missing data patterns\n\nThe pattern can be checked using the following code, missing data pattern could be classified as \"Monotone\" or \"Arbitrary\"\n\n- \"Monotone\" : The missingness of data follows a specific order such that if a certain variable is missing for a particular observation, all subsequent variables are also missing for that observation. If a dataset has columns X1,X2,...,Xk a monotone missing pattern appears when: If Xj is missing, then Xj+1, Xj+2,...,Xj+3 are missing.\n\n- \"Arbitrary\" : The missingness of data does not follow any specific order or predictable sequence. Data can be missing at random points without a discernible pattern.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nods select MissPattern;\nproc mi data=dummyt nimpute=0;\n var v1 - v5;\nrun;\n```\n:::\n\n\nAs below figure shows the missingness dose not follow any specific order, obviously the missing pattern is arbitrary and non-monotone missing pattern.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_missing_pattern.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n## FCS Regression for non-monotone missing pattern\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc mi data=dummyt out=outdata nimpute=10 seed=123;\n class sex1n;\n var sex1n v1 - v5;\n fcs reg (v1-v5 /details);\nrun;\n```\n:::\n\n\n- The `VAR` statement above listing the variables to be analyzed, should match the statistical models for efficacy analysis per SAP, which may include TRTPN, necessary grouping variable (for eg AGEGR1/AGEGR1N), and all outcome variables coming from repeated assessments\n- `NIMPUTE` : the number of imputations\n- `SEED` : the seed to begin random number generator\n- Note that depending on the SAS Proc MI algorithm, if there are more factors, the ordering of factors, for example SEX1N, RACE1N, may have an effect on the generation of the imputed values for the missing values, i.e., different orderings of these factors will generate different imputed values (e.g may happen in case of monotone missing pattern) from PROC MI procedure. The ordering of subjects in the dataset may also have an effect on the generation of the imputed values for the missing values.\n- The `CLASS` statement specifies the classification variables in the VAR statement.\n- `FCS` is displayed as the method, if not specified then MCMC will be the default method.\n- `REG` is the specified model which in this example is linear regression)\n- The `DETAILS` option displays the regression coefficients in the regression model used in each imputation.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_fcs.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_fcs2.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n## Monotone Regression for monotone missing pattern\n\nLet's update above SAS code to generate a dummy dataset with monotone missing pattern\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata dummy;\n length USUBJID $4;\n do i=1 to 10; \n sex1n=int(ranuni(0)*2); \n USUBJID = strip(put(1000+i, best.));\n miss_start = ceil(ranuni(0) * 5); /* Randomly decide the start point for missing data (1 to 5) */\n do j=1 to 5; \n AVISITN = j;\n if j >= miss_start then AVAL = .; /* If the visit number is greater than or equal to miss_start, make AVAL missing */\n else AVAL = round(1 + ranuni(0), 0.01);\n output; \n end;\n end;\n drop i miss_start j;\nrun;\n\nproc sort data=dummy; \n by usubjid sex1n;\nrun;\n\nproc transpose data=dummy out=dummyt(drop=_name_) prefix=v;\n by USUBJID sex1n;\n id avisitn;\n var aval;\nrun;\n\nproc print data=dummyt(obs=5);\nrun;\n\nods select MissPattern;\nproc mi data=dummyt nimpute=0;\n var v1 - v5;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_missing_pattern_monotone.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\nIn this case we will use `monotone` statement instead of `FCS` for the imputation, example code as below:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc mi data=dummyt out=outdata nimpute=10 seed=123;\n class sex1n;\n var sex1n v1 - v5;\n monotone reg (v1-v5 /details);\nrun;\n```\n:::\n\n\n## Reference\n\n- [User's Guide The MI Procedure](https://support.sas.com/documentation/onlinedoc/stat/141/mi.pdf)\n- [Multiple Imputation: A Statistical Programming Story](https://www.pharmasug.org/proceedings/2017/SP/PharmaSUG-2017-SP01.pdf)\n- [Examine patterns of missing data in SAS](https://blogs.sas.com/content/iml/2016/04/18/patterns-of-missing-data-in-sas.html#:~:text=PROC%20MI%20has%20an%20option%20to%20produce%20a,MissPattern%3B%20proc%20mi%20data%20%3DSashelp.Heart%20nimpute%3D%200%20%3B)", + "markdown": "---\ntitle: \"Multiple Imputaton: Linear Regression in SAS\"\n---\n\n## Input dataset preparation before multiple imputation\n\n1. Prepare a subset of the analysis dummy dataset, details as below:\n\n- `USUBJID` (length 4): Subject ID.\n- `SEX1N`: Sex A random integer between 0 and 1 representing a binary variable (perhaps gender).\n- `AVISITN`: Visit number (1 to 5 for each subject).\n- `AVAL`: A random value between 1 and 2, with a random 10% chance of being missing.\n\nAs PROC MI requires a horizontal, one record per subject data set. More often than not, the data we impute will come from a vertical ADaM BDS data set. So we need to first transpose the aval with the avisitn as ID (assuming avisitn = 1 to 5),creating transposed variable v1-v5.\n\n```sas\ndata dummy;\n length USUBJID $4;\n do i=1 to 10;\n sex1n=int(ranuni(0)*2);\n do j=1 to 5;\n USUBJID=strip(put(1000+i,best.));\n AVISITN=j;\n AVAL=round(1+ranuni(0),0.01);\n if ranuni(0) <0.1 then aval=.;\n output;\n end;\n end;\n drop i j;\nrun;\n\nproc sort data=dummy; \n by usubjid sex1n;\nrun;\n\nproc transpose data=dummy out=dummyt(drop=_name_) prefix=v;\n by USUBJID sex1n;\n id avisitn;\n var aval;\nrun;\n\nproc print data=dummyt(obs=5);\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_view_Data.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n## Check missing data patterns\n\nThe pattern can be checked using the following code, missing data pattern could be classified as \"Monotone\" or \"Arbitrary\"\n\n- \"Monotone\" : The missingness of data follows a specific order such that if a certain variable is missing for a particular observation, all subsequent variables are also missing for that observation. If a dataset has columns X1,X2,...,Xk a monotone missing pattern appears when: If Xj is missing, then Xj+1, Xj+2,...,Xj+3 are missing.\n\n- \"Arbitrary\" : The missingness of data does not follow any specific order or predictable sequence. Data can be missing at random points without a discernible pattern.\n\n```sas\nods select MissPattern;\nproc mi data=dummyt nimpute=0;\n var v1 - v5;\nrun;\n```\n\nAs below figure shows the missingness dose not follow any specific order, obviously the missing pattern is arbitrary and non-monotone missing pattern.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_missing_pattern.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n## FCS Regression for non-monotone missing pattern\n\n```sas\nproc mi data=dummyt out=outdata nimpute=10 seed=123;\n class sex1n;\n var sex1n v1 - v5;\n fcs reg (v1-v5 /details);\nrun;\n```\n\n- The `VAR` statement above listing the variables to be analyzed, should match the statistical models for efficacy analysis per SAP, which may include TRTPN, necessary grouping variable (for eg AGEGR1/AGEGR1N), and all outcome variables coming from repeated assessments\n- `NIMPUTE` : the number of imputations\n- `SEED` : the seed to begin random number generator\n- Note that depending on the SAS Proc MI algorithm, if there are more factors, the ordering of factors, for example SEX1N, RACE1N, may have an effect on the generation of the imputed values for the missing values, i.e., different orderings of these factors will generate different imputed values (e.g may happen in case of monotone missing pattern) from PROC MI procedure. The ordering of subjects in the dataset may also have an effect on the generation of the imputed values for the missing values.\n- The `CLASS` statement specifies the classification variables in the VAR statement.\n- `FCS` is displayed as the method, if not specified then MCMC will be the default method.\n- `REG` is the specified model which in this example is linear regression)\n- The `DETAILS` option displays the regression coefficients in the regression model used in each imputation.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_fcs.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_fcs2.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\n## Monotone Regression for monotone missing pattern\n\nLet's update above SAS code to generate a dummy dataset with monotone missing pattern\n\n```sas\ndata dummy;\n length USUBJID $4;\n do i=1 to 10; \n sex1n=int(ranuni(0)*2); \n USUBJID = strip(put(1000+i, best.));\n miss_start = ceil(ranuni(0) * 5); /* Randomly decide the start point for missing data (1 to 5) */\n do j=1 to 5; \n AVISITN = j;\n if j >= miss_start then AVAL = .; /* If the visit number is greater than or equal to miss_start, make AVAL missing */\n else AVAL = round(1 + ranuni(0), 0.01);\n output; \n end;\n end;\n drop i miss_start j;\nrun;\n\nproc sort data=dummy; \n by usubjid sex1n;\nrun;\n\nproc transpose data=dummy out=dummyt(drop=_name_) prefix=v;\n by USUBJID sex1n;\n id avisitn;\n var aval;\nrun;\n\nproc print data=dummyt(obs=5);\nrun;\n\nods select MissPattern;\nproc mi data=dummyt nimpute=0;\n var v1 - v5;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/mi_mar_linear_sas/mi_mar_reg_missing_pattern_monotone.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\nIn this case we will use `monotone` statement instead of `FCS` for the imputation, example code as below:\n\n```sas\nproc mi data=dummyt out=outdata nimpute=10 seed=123;\n class sex1n;\n var sex1n v1 - v5;\n monotone reg (v1-v5 /details);\nrun;\n```\n\n## Reference\n\n- [User's Guide The MI Procedure](https://support.sas.com/documentation/onlinedoc/stat/141/mi.pdf)\n- [Multiple Imputation: A Statistical Programming Story](https://www.pharmasug.org/proceedings/2017/SP/PharmaSUG-2017-SP01.pdf)\n- [Examine patterns of missing data in SAS](https://blogs.sas.com/content/iml/2016/04/18/patterns-of-missing-data-in-sas.html#:~:text=PROC%20MI%20has%20an%20option%20to%20produce%20a,MissPattern%3B%20proc%20mi%20data%20%3DSashelp.Heart%20nimpute%3D%200%20%3B)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/mmrm/execute-results/html.json b/_freeze/SAS/mmrm/execute-results/html.json deleted file mode 100644 index 7ff5dfcc5..000000000 --- a/_freeze/SAS/mmrm/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "9752fcd568c0271d0561b505e1baa55f", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"MMRM in SAS\"\nexecute: \n eval: false\n---\n\n# Mixed Models\n\n#### Fitting the MMRM in SAS\n\nIn SAS the following code was used (assessments at `avisitn=0` should also be removed from the response variable):\n\n```{sas}\nproc mixed data=adlbh;\n where base ne . and avisitn not in (., 99);\n class usubjid trtpn(ref=\"0\") avisitn;\n by paramcd param;\n model chg=base trtpn avisitn trtpn*avisitn / solution cl alpha=0.05 ddfm=KR;\n repeated avisitn/subject=usubjid type=&covar;\n lsmeans trtpn * avisitn / diff cl slice=avisitn;\n lsmeans trtpn / diff cl;\nrun;\n```\n\nwhere the macro variable `covar` could be `UN`, `CS` or `AR(1)`. The results were stored in .csv files that were post-processed in R and compared with the results from R.\n\n", - "supporting": [ - "mmrm_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/nparestimate/execute-results/html.json b/_freeze/SAS/nparestimate/execute-results/html.json deleted file mode 100644 index 4673f5786..000000000 --- a/_freeze/SAS/nparestimate/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "6d7bac0b8ff4ce4d808fde8fde3f9104", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Non-parametric point estimation in SAS\"\nexecute: \n eval: false\n---\n\n# Introduction\n\nThe Hodges-Lehman estimator (Hodges and Lehmann 1962) provides a point estimate which is associated with the Wilcoxon rank sum statistics based on location shift. This is typically used for the 2-sample comparison with small sample size. Note: The Hodges-Lehman estimates the median of the difference and not the difference of the medians. The corresponding distribution-free confidence interval (CI) is also based on the Wilcoxon rank sum statistics (Moses). In addition, exact CIs can be constructed.\n\nPROC NPAR1WAY provides these estimates in a flexible manner.\n\n*Hodges, J. L. and Lehmann, E. L. (1962) Rank methods for combination of independent experiments in analysis of variance. Annals of Mathematical Statistics, 33, 482-4.*\n\n# Case study\n\n```{sas}\n# Hollander-Wolfe-Chicken Example\ndata all;\ninput group $ value; \n cards;\nA 1.83\nA 0.50\nA 1.62\nA 2.48\nA 1.68\nA 1.88\nA 1.55\nA 3.06\nA 1.30\nB 0.878\nB 0.647\nB 0.598\nB 2.050\nB 1.060\nB 1.290\nB 1.060\nB 3.140\nB 1.290\n; \nrun;\n```\n\n# Hodges-Lehmann estimate and confidence interval\n\nHodges-Lehmann estimate and Moses confidence interval for the 2-sample case will be generated when putting HL as an option. The direction of the comparison can be controlled via refclass. If the exact confidence interval is required additionally then the exact statement together with the option HL needs to be defined. The Hodges-Lehmann point estimate and confidence interval can be addressed via the HodgesLehmann option under the ODS statement.\n\n```{sas}\nproc npar1way hl (refclass = \"B\") data = all;\n class group;\n var value;\n exact hl;\n ods select HodgesLehmann;\nrun;\n```\n\n# Results\n\nThe NPAR1WAY Procedure\n\n``` default\n Hodges-Lehmann Estimation \n\n Location Shift (A - B) 0.5600 \n\n Interval Asymptotic \n Type 95% Confidence Limits Midpoint Standard Error \n\n Asymptotic (Moses) -0.3700 1.1830 0.4065 0.3962 \n Exact -0.2200 1.0820 0.4310 \n```\n\n", - "supporting": [ - "nparestimate_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/random_effects_models/execute-results/html.json b/_freeze/SAS/random_effects_models/execute-results/html.json new file mode 100644 index 000000000..c1fa5815c --- /dev/null +++ b/_freeze/SAS/random_effects_models/execute-results/html.json @@ -0,0 +1,15 @@ +{ + "hash": "3aeb403b0f5e895a3dee14c852be961d", + "result": { + "engine": "knitr", + "markdown": "---\ntitle: \"Random Effects Models in SAS\"\nexecute: \n eval: false\n---\n\n## Fitting Random Effects Models in SAS\n\nIn a classical regression model, coefficients in the model are fixed across all observations and observations are assumed to be independent. Mixed effects models introduce random coefficients to the model, called random effects, which vary randomly between different groups of observations. The introduction of random effects leads to observations within a group being correlated.\n\n### Setting up the model\n\nPROC MIXED can be used to used to implement random effects models. Random effects are added to the model through the random statement. \n\nAs an example, suppose that we want the intercept in the model to vary randomly between participants, in other words, a random constant is included in the model which is different for each participant. \n\nThis is achieved by random intercept / subject=USUBJID.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc mixed data=data; \nclass USUBJID TRTP; \nmodel AVAL = TRTP / ddfm = kenwardroger solution;\nrandom intercept / subject = USUBJID;\nrun;\n```\n:::\n\n\nThe estimated variance of the random effect(s) can be found in the model results.\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\nIf you want the coefficient of TRTP to vary randomly by participant, include it in the random statement.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nrandom intercept TRTP / subject = USUBJID type=vc;\n```\n:::\n\n\n\nThe type option allows for the covariance structure between the random effects to be specified. The default is vc (variance components) which means that random effects are not correlated.\n\n### Inference on a single coefficient\n\nDegrees of freedom, p-values and confidence intervals for model coefficients are provided\nwhen the solution option is used in the model statement. The degrees of freedom method\nis specified using the ddfm option in the model statement.\n\n### Inference on a contrast\nThe estimate statement can be used to construct contrasts. An alternative is to use\nthe lsmestimate statement, which allows for contrasts to be constructed in terms of \nleast square means. Least square means can be calculated using the lsmeans statement.\nFor all of these methods, the output gives degrees of freedom, p-values and confidence intervals.\n", + "supporting": [], + "filters": [ + "rmarkdown/pagebreak.lua" + ], + "includes": {}, + "engineDependencies": {}, + "preserve": {}, + "postProcess": true + } +} \ No newline at end of file diff --git a/_freeze/SAS/ranksum/execute-results/html.json b/_freeze/SAS/ranksum/execute-results/html.json deleted file mode 100644 index 4d0241223..000000000 --- a/_freeze/SAS/ranksum/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "13903370722ff584601d62c5633c33be", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Wilcoxon Rank Sum /Mann-Whitney U test\"\nexecute: \n eval: false\n---\n\n# Wilcoxon Rank Sum / Mann-Whitney U test\n\nThe Wilcoxon rank-sum test, also known as the Mann-Whitney U test, is a nonparametric test used to compare differences between two independent samples. It's particularly useful when the sample distributions are not normally distributed and the sample sizes are small (typically less than 30).\n\n## Wilcoxon Rank Sum / Mann-Whitney U in SAS\n\nTo perform a Wilcoxon rank-sum test in SAS, you can use the PROC NPAR1WAY procedure. Here's a step-by-step guide:\n\n1. **Create the Dataset**: If there are two groups (smoker and non-smoker) with their respective measurements birth weight, you can input the data as follows:\n\n```{sas}\n/* Create dataset */\ndata bw;\n input bw grp $;\n datalines;\n3.99 ns\n3.89 ns\n3.6 ns\n3.73 ns\n3.31 ns\n3.7 ns\n4.08 ns\n3.61 ns\n3.83 ns\n3.41 ns\n4.13 ns\n3.36 ns\n3.54 ns\n3.51 ns\n2.71 ns\n3.18 s\n2.74 s\n2.9 s\n3.27 s\n3.65 s\n3.42 s\n3.23 s\n2.86 s\n3.6 s\n3.65 s\n3.69 s\n3.53 s\n2.38 s\n2.34 s\n;\nrun;\n```\n\n2. **Perform the Wilcoxon rank-sum Test**: Use the PROC NPAR1WAY procedure to perform the test. The wilcoxon option specifies that you want to perform the Wilcoxon rank-sum test. When computing the asymptotic Wilcoxon two-sample test, PROC NPAR1WAY uses a continuity correction by default. If specify the CORRECT=NO option in the PROC NPAR1WAY statement, the procedure does not use a continuity correction. Typically, we will also want the Hodges-Lehman confidence intervals. To get these you will need to add `hl` to the pro npar1way statement.\n\n```{sas}\n/* Perform Wilcoxon rank-sum test - with continuity correction by default*/\nproc npar1way data=BW wilcoxon hl;\n class grp;\n var bw;\nrun;\n\n/* Perform Wilcoxon rank-sum test - without continuity correction*/\nproc npar1way data=BW wilcoxon CORRECT=NO hl;\n class grp;\n var bw;\nrun;\n```\n\n## Results\n\n### Wilcoxon rank-sum test - with continuity correction by default\n\n![](/images/ranksum/ranksum.png)\n\nAs seen above, SAS outputs a table of Wilcoxon Scores for birth weight by non-smoker and smoker: the number (N); the sum of scores; the expected sum of scores under the null hypothesis; the standard deviation under the null hypothesis, and the observed mean score. The table also includes a footnote to specify that ties were handled by using the average score.\n\nSAS also outputs a table of Wilcoxon Two-sample Test. This table includes a footnote to specify that a continuiity correction of 0.5 is used.\n\n**Statistic**: 150.5000\n\n**Z**: -2.5756 (This is the test statistic after applying a continuity correction of 0.5)\n\n**Pr \\< Z**: 0.0050 (This is the one-tailed p-value). The one-tailed p-value (Pr\\< Z) of 0.0050 suggests that there is a 0.5% chance of observing a test statistic as extreme as 1.2498 under the null hypothesis.\n\n**Pr \\> \\|Z\\|**: 0.0100 (This is the two-tailed p-value). The two-tailed p-value (Pr \\> \\|Z\\|) of 0.0100 suggests that there is a 1.00 % chance of observing a test statistic as extreme as 1.2498 in either direction under the null hypothesis.\n\nThe t-distribution approximations provide similar p-values, indicating the robustness of the results.\n\n**t Approximation Pr \\< Z**: 0.0078 (This is the one-tailed p-value using a t-distribution approximation)\n\n**t Approximation Pr \\> \\|Z\\|**: 0.0156 (This is the two-tailed p-value using a t-distribution approximation)\n\nSince the p-values (both one-tailed and two-tailed) are less than the common significance level (e.g., 0.05), we can reject the null hypothesis. This means there is a significant difference between the two groups (ns and s) for the variable BW.\n\n### Wilcoxon rank-sum test - without continuity correction\n\n![](/images/ranksum/ranksum2.png)\n\nAs seen above, Wilcoxon Two-Sample Test results are changed because No continuity correction is used.\n\n### Hodges-Lehman\n\nThe correction does not effect the Hodges-Lehman CI. The Location shift is the Hodges-Lehmann estimator. By default the asymptotic (Moses) CI is shown.\n\n![](/images/ranksum/hl-ci.png)\n\n### Wilcoxon rank-sum test - Exact\n\nFor sufficiently small sample size, the large-sample normal approximation used by the asymptotic Wilcoxon might not be appropriate, so the exact statement is needed.\n\n```{sas}\n/* Perform Wilcoxon rank-sum test - with continuity correction by default*/\nproc npar1way data=BW wilcoxon CORRECT=NO hl;\n class grp;\n var bw;\n exact wilcoxon hl;\nrun;\n```\n\n![](/images/ranksum/wrs-exact.png)\n\nThe exact hl part of that statement makes the exact and asymptotic Hodges-Lehmann CI appear.\n\n![](/images/ranksum/hl-exact.png)\n\n# References\n\n[SAS Help Center: TWOSAMPLEWILCOXON Statement](https://documentation.sas.com/doc/en/statug/15.2/statug_power_syntax112.htm) [SAS Help Center: Overview: NPAR1WAY Procedure](https://documentation.sas.com/doc/en/statug/15.2/statug_npar1way_overview.htm)\n\n*Data source: Table 30.4, Kirkwood BR. and Sterne JAC. Essentials of medical statistics. Second Edition. ISBN 978-0-86542-871-3*\n\n", - "supporting": [ - "ranksum_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/rbmi_continuous_joint_SAS/execute-results/html.json b/_freeze/SAS/rbmi_continuous_joint_SAS/execute-results/html.json index 6065ea7ad..c780a9fa1 100644 --- a/_freeze/SAS/rbmi_continuous_joint_SAS/execute-results/html.json +++ b/_freeze/SAS/rbmi_continuous_joint_SAS/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "e5b8aab3d5d1e9f6ceddc34db869889a", + "hash": "4788fa9718bb9d78b409d5b8bf7139a6", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Reference-Based Multiple Imputation (joint modelling): Continuous Data\"\nexecute: \n eval: false\n---\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\n\nReference-based multiple imputation methods have become popular for handling missing data, as well as for conducting sensitivity analyses, in randomized clinical trials. In the context of a repeatedly measured continuous endpoint assuming a multivariate normal model, [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911) proposed a framework to extend the usual MAR-based MI approach by postulating assumptions about the joint distribution of pre- and post-deviation data. Under this framework, one makes qualitative assumptions about how individuals’ missing outcomes relate to those observed in relevant groups in the trial, based on plausible clinical scenarios. Statistical analysis then proceeds using the method of multiple imputation ([Rubin 1976](https://doi.org/10.1093/biomet/63.3.581), [Rubin 1987](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696)).\n\nIn general, multiple imputation of a repeatedly measured continuous outcome can be done via 2 computational routes ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)):\n\n1. Stepwise: split problem into separate imputations of data at each visit\n\n - requires monotone missingness, such as missingness due to withdrawal\n\n - conditions on the imputed values at previous visit\n\n - Bayesian linear regression problem is much simpler with monotone missing, as one can sample directly using conjugate priors\n\n2. One-step approach (joint modelling): Fit a Bayesian full multivariate normal repeated measures model using MCMC and then draw a sample.\n\nHere, we illustrate reference-based multiple imputation of a continuous outcome measured repeatedly via the so-called one-step approach.\n\n### Five Macros\n\nThe `five macros` ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)), available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data), fit a Bayesian Normal RM model and then impute post withdrawal data under a series of possible post-withdrawal profiles including J2R, CIR and CR as described by [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911). It then analyses the data using a univariate ANOVA at each visit and summarizes across imputations using Rubin’s rules.\n\nThe following standard and reference-based multiple imputation approaches will be illustrated here:\n\n- MAR (Missing At Random)\n\n- CIR (Copy Increment from Reference)\n\n- J2R (Jump to Reference)\n\n- CR (Copy Reference)\n\n## Data used\n\nA publicly available example [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial of an active drug versus placebo is used. Overall, data of 172 patients is available with 88 patients receiving placebo and 84 receiving active drug. The same data is used for the [R part](../R/rbmi_continuous_joint.html).\n\nThe relevant endpoint is the Hamilton 17-item depression rating scale (HAMD17) which was assessed at baseline and at weeks 1, 2, 4, and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects from the active drug and 26% (23/88) of subjects from placebo. All data after study drug discontinuation are missing.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc print data=dat (obs=10);\n\t var PATIENT GENDER THERAPY RELDAYS VISIT BASVAL HAMDTL17 CHANGE;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData1.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe number of patients per visit and arm are:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data=dat;\n\t table VISIT*THERAPY / norow nocol nopercent nocum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData2.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe mean change from baseline of the endpoint (Hamilton 17-item depression rating scale, HAMD17) per visit per treatment group using only the complete cases are:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc means data=dat n mean nonobs;\n\t class VISIT THERAPY;\n\t var CHANGE;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData3.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe missingness pattern is show below. The incomplete data is primarily monotone in nature. 128 patients have complete data for all visits (all 1’s at each visit). 20, 10 and 13 patients have 1, 2 or 3 monotone missing data, respectively. Further, there is a single additional intermittent missing observation (patient 3618).\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc transpose data=dat out=HAMD_wide(drop=_NAME_) prefix=CHG;\n\t by PATIENT THERAPY BASVAL;\n\t id VISIT;\n\t var CHANGE;\nrun;\n\nproc mi data=HAMD_wide nimpute=0 displaypattern=NOMEANS;\n\t var CHG4 CHG5 CHG6 CHG7;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData4.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\n## Complete case analysis\n\nA complete case analysis is performed using mixed model for repeated measures (MMRM) with covariates: treatment \\[THERAPY\\], gender \\[GENDER\\], visit \\[VISIT\\] as factors; baseline score \\[BASVAL\\] as continuous; and visit-by-treatment \\[THERAPY \\* VISIT\\] interaction, and visit-by-baseline \\[BASVAL \\* VISIT\\] interaction. An unstructured covariance matrix is used. The **MIXED** procedure is used.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc mixed data=dat method=reml;\n\t class THERAPY(ref=\"PLACEBO\") VISIT(ref=\"4\") PATIENT GENDER(ref=\"F\");\n\t model CHANGE = THERAPY GENDER VISIT BASVAL THERAPY*VISIT BASVAL*VISIT /s ddfm=satterthwaite;\n\t repeated VISIT / type=UN subject=PATIENT r;\n\t lsmeans THERAPY*VISIT / diff=control(\"PLACEBO\" \"7\") cl;\nrun;\n```\n:::\n\n\nThe parameter estimates of the fixed effects are:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_CompleteCase_fixedEstimates.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe estimated unstructured covariance matrix parameters are:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_CompleteCase_covarianceEstimates.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe treatment difference at visit 7 is of interest, and is estimated to be -2.829 (se=1.117) with 95% CI of \\[-5.033 to -0.624\\] (p=0.0122).\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_CompleteCase_contrastEstimates.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MAR approach\n\nAs described above, the so-called `five macros` will be used for the SAS implementation. The `five macros` are available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data) under Reference-based MI via Multivariate Normal RM (the \"five macros and MIWithD\"). For the details, see the use guide available in the download of the `five macros`.\n\nApplying the `five macros` for reference-based multiple imputation entails the sequential run of the following:\n\n- Part1A declares the parameter estimation model and checks consistency with the dataset. It builds a master dataset which holds details of the current job (run of the macros in sequence). It also builds indexes for the classification variables, which may be either numeric or character.\n\n- Part1B fits the parameter estimation model using the MCMC procedure and draws a pseudo-independent sample from the joint posterior distribution for the linear predictor parameters and the covariance parameters.\n\n- Part2A calculates the predicted mean under MAR, and under MNAR for each subject based on their withdrawal pattern once for each draw of the linear predictor parameter estimates. The choice of MNAR is controlled by the method used, which may vary from subject to subject.\n\n- Part2B imputes the intermediate missing values using MAR and the trailing missing values using MNAR, by deriving the conditional distribution for the missing values conditional on the observed values and covariates, using the appropriate sampled covariance parameter estimates.\n\n- Part3 carries out a univariate ANOVA analysis at selected time points usually based on the same covariates as the parameter estimation model. It then combines the least-squares means and their differences using the MIANALYZE procedure to provide final results. It is in this macro which handles the Delta methods.\n\nMost of the computation time is spent in the Part1B macro where the MCMC procedure is used to generate a sample from the posterior distribution of the full set of model parameters. These are stored away and can be used repeatedly by calling the later macros over and over again using different imputation methods.\n\nTo perform reference-based multiple imputation using MAR approach to following code is used\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%part1A(jobname = HAMD, \n Data=dat,\n Subject=PATIENT,\n RESPONSE = CHANGE,\n Time = VISIT,\n Treat = THERAPY,\n Covbytime = BASVAL,\n Catcov = GENDER);\n\n%part1B(jobname = HAMD,\n Ndraws = 500,\n thin = 10,\n seed = 12345);\n\n%part2A(jobname = HAMD_MAR,\n inname = HAMD,\n method = MAR);\n\n%part2B(jobname = HAMD_MAR,\n seed = 12345);\n\n%part3(Jobname = HAMD_MAR,\n anref = PLACEBO,\n Label = MAR);\n```\n:::\n\n\nTo print the results of the contrast at week 7\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc print data=HAMD_MAR_OUT;\n\twhere VISIT = \"7\";\n\tvar VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MAR_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MNAR CR approach\n\nTo perform reference-based multiple imputation using Copy Reference (CR) approach the following changes are needed in part2A of the 5 macros\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%part2A(jobname = HAMD_CR,\n inname = HAMD,\n method = CR,\n ref = PLACEBO);\n```\n:::\n\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_CR_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MNAR J2R approach\n\nTo perform reference-based multiple imputation using Jump to Reference (J2R) approach the following changes are needed in part2A of the 5 macros\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%part2A(jobname = HAMD_J2R,\n inname = HAMD,\n method = J2R,\n ref = PLACEBO);\n```\n:::\n\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_J2R_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MNAR CIR approach\n\nTo perform reference-based multiple imputation using Copy Increments in Reference (CIR) approach the following changes are needed in part2A of the 5 macros\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%part2A(jobname = HAMD_CIR,\n inname = HAMD,\n method = CIR,\n ref = PLACEBO);\n```\n:::\n\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_CIR_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Summary of results\n\nIn the table we present the results of the different imputation strategies (and with varying number, *M*, of multiple imputation draws). Note that some results can be slightly different from the results above due to a possible different seed. The table show the contrast at Visit 7 between DRUG and PLACEBO \\[DRUG - PLACEBO\\]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|------------------------|----------|-------|------------------|---------|\n| Complete Case | -2.829 | 1.117 | -5.035 to -0.623 | 0.0123 |\n| MI - MAR (M=500) | -2.810 | 1.122 | -5.029 to -0.592 | 0.0134 |\n| MI - MAR (M=2000) | -2.816 | 1.128 | -5.047 to -0.586 | 0.0137 |\n| MI - MAR (M=5000) | -2.825 | 1.123 | -5.045 to -0.605 | 0.0130 |\n| MI - MNAR CR (M=500) | -2.384 | 1.120 | -4.598 to -0.170 | 0.0350 |\n| MI - MNAR CR (M=2000) | -2.390 | 1.118 | -4.599 to -0.180 | 0.0342 |\n| MI - MNAR CR (M=5000) | -2.400 | 1.115 | -4.604 to -0.196 | 0.0330 |\n| MI - MNAR J2R (M=500) | -2.122 | 1.141 | -4.377 to 0.133 | 0.0650 |\n| MI - MNAR J2R (M=2000) | -2.135 | 1.140 | -4.388 to 0.117 | 0.0630 |\n| MI - MNAR J2R (M=5000) | -2.144 | 1.136 | -4.389 to 0.101 | 0.0611 |\n| MI - MNAR CIR (M=500) | -2.461 | 1.120 | -4.674 to -0.248 | 0.0296 |\n| MI - MNAR CIR (M=2000) | -2.469 | 1.118 | -4.679 to -0.260 | 0.0287 |\n| MI - MNAR CIR (M=5000) | -2.481 | 1.115 | -4.684 to -0.278 | 0.0276 |\n\n## Discussion\n\nA note on computational time. The total running time (including data loading, setting up data sets, MCMC run, imputing data and analysis MI data) for M=500 was about 23 seconds on a personal laptop. It increased to about 44 seconds for M=2000. Computational time was similar across difference imputation strategies.\n\nWith a small number of `Ndraws` in part1B a warning could pop-up \"There is still significant autocorrelation after 5 lags, and the effective sample size for the parameter might not be estimated accurately.\". Increasing the number of `Ndraws` will mostly solve this warning. For example, for this data example, this message is received when setting `Ndraws` below 100.\n\n## Appendix 1: mmrm as analysis model\n\nPart 3 of the 5 macros carries out a univariate ANOVA analysis at selected time points usually based on the same covariates as the parameter estimation model. It then combines the least-squares means and their differences using Rubin’s formula in a calculation similar to that in the MIANALYZE procedure to provide final results.\n\nSince, all imputed datasets are readily available (after part2B), another possibility is to analyse each imputed dataset using the analysis model of your choice, and combining the results using `PROC MIANALYZE`. For example, suppose an MMRM should be fit on each imputed dataset:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata HAMD_CR_DATAFULL;\n\tset HAMD_CR_DATAFULL;\n\t_Imputation_ = draw;\nrun;\t\n\nproc mixed data=HAMD_CR_DATAFULL ;\n\tby _Imputation_;\n class THERAPY(ref=\"PLACEBO\") VISIT(ref=\"4\") PATIENT GENDER(ref=\"F\");\n model CHANGE = THERAPY GENDER VISIT BASVAL THERAPY*VISIT BASVAL*VISIT /s ddfm=satterthwaite;\n repeated VISIT / type=UN subject=PATIENT r;\n lsmeans THERAPY*VISIT / diff=control(\"PLACEBO\" \"7\") cl;\n ods output Diffs=diff01;\nrun;\t\n\nproc mianalyze parms=diff01(where=(VISIT=\"7\"));\n\tclass THERAPY VISIT;\n\tmodeleffects THERAPY*VISIT;\n\tods output ParameterEstimates=res01;\nrun;\n```\n:::\n\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_mmrm.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Reference\n\n[Carpenter JR, Roger JH & Kenward MG (2013)](https://doi.org/10.1080/10543406.2013.834911). Analysis of Longitudinal Trials with Protocol Deviation: A Framework for Relevant, Accessible Assumptions, and Inference via MI. *Journal of Biopharmaceutical Statistics* 23: 1352-1371.\n\n[Five macros: Drug Information Association (DIA) Missing Data Working Group (2012)](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data). Reference-based MI via Multivariate Normal RM (the “five macros and MIWithD”). London School of Hygiene and Tropical Medicine DIA Missing Data.\n\n[PROC MIANALYZE, SAS Institute Inc. (2017)](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.3/statug/statug_mianalyze_toc.htm). SAS/STAT® 14.3 User’s Guide. Cary, NC: SAS Institute Inc.\n\n[Roger J (2022, Dec 8)](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. *Addressing intercurrent events: Treatment policy and hypothetical strategies*. Joint EFSPI and BBS virtual event.\n\n[Rubin DB (1976)](https://doi.org/10.1093/biomet/63.3.581). Inference and Missing Data. *Biometrika* 63: 581–592.\n\n[Rubin DB (1987)](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696). *Multiple Imputation for Nonresponse in Surveys*. New York: John Wiley & Sons.", + "markdown": "---\ntitle: \"Reference-Based Multiple Imputation (joint modelling): Continuous Data\"\n---\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\n\nReference-based multiple imputation methods have become popular for handling missing data, as well as for conducting sensitivity analyses, in randomized clinical trials. In the context of a repeatedly measured continuous endpoint assuming a multivariate normal model, [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911) proposed a framework to extend the usual MAR-based MI approach by postulating assumptions about the joint distribution of pre- and post-deviation data. Under this framework, one makes qualitative assumptions about how individuals’ missing outcomes relate to those observed in relevant groups in the trial, based on plausible clinical scenarios. Statistical analysis then proceeds using the method of multiple imputation ([Rubin 1976](https://doi.org/10.1093/biomet/63.3.581), [Rubin 1987](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696)).\n\nIn general, multiple imputation of a repeatedly measured continuous outcome can be done via 2 computational routes ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)):\n\n1. Stepwise: split problem into separate imputations of data at each visit\n\n - requires monotone missingness, such as missingness due to withdrawal\n\n - conditions on the imputed values at previous visit\n\n - Bayesian linear regression problem is much simpler with monotone missing, as one can sample directly using conjugate priors\n\n2. One-step approach (joint modelling): Fit a Bayesian full multivariate normal repeated measures model using MCMC and then draw a sample.\n\nHere, we illustrate reference-based multiple imputation of a continuous outcome measured repeatedly via the so-called one-step approach.\n\n### Five Macros\n\nThe `five macros` ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)), available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data), fit a Bayesian Normal RM model and then impute post withdrawal data under a series of possible post-withdrawal profiles including J2R, CIR and CR as described by [Carpenter et al. (2013)](https://www.tandfonline.com/doi/full/10.1080/10543406.2013.834911). It then analyses the data using a univariate ANOVA at each visit and summarizes across imputations using Rubin’s rules.\n\nThe following standard and reference-based multiple imputation approaches will be illustrated here:\n\n- MAR (Missing At Random)\n\n- CIR (Copy Increment from Reference)\n\n- J2R (Jump to Reference)\n\n- CR (Copy Reference)\n\n## Data used\n\nA publicly available example [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial of an active drug versus placebo is used. Overall, data of 172 patients is available with 88 patients receiving placebo and 84 receiving active drug. The same data is used for the [R part](../R/rbmi_continuous_joint.html).\n\nThe relevant endpoint is the Hamilton 17-item depression rating scale (HAMD17) which was assessed at baseline and at weeks 1, 2, 4, and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects from the active drug and 26% (23/88) of subjects from placebo. All data after study drug discontinuation are missing.\n\n```sas\nproc print data=dat (obs=10);\n\t var PATIENT GENDER THERAPY RELDAYS VISIT BASVAL HAMDTL17 CHANGE;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData1.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe number of patients per visit and arm are:\n\n```sas\nproc freq data=dat;\n\t table VISIT*THERAPY / norow nocol nopercent nocum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData2.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe mean change from baseline of the endpoint (Hamilton 17-item depression rating scale, HAMD17) per visit per treatment group using only the complete cases are:\n\n```sas\nproc means data=dat n mean nonobs;\n\t class VISIT THERAPY;\n\t var CHANGE;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData3.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe missingness pattern is show below. The incomplete data is primarily monotone in nature. 128 patients have complete data for all visits (all 1’s at each visit). 20, 10 and 13 patients have 1, 2 or 3 monotone missing data, respectively. Further, there is a single additional intermittent missing observation (patient 3618).\n\n```sas\nproc transpose data=dat out=HAMD_wide(drop=_NAME_) prefix=CHG;\n\t by PATIENT THERAPY BASVAL;\n\t id VISIT;\n\t var CHANGE;\nrun;\n\nproc mi data=HAMD_wide nimpute=0 displaypattern=NOMEANS;\n\t var CHG4 CHG5 CHG6 CHG7;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_ExploreData4.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\n## Complete case analysis\n\nA complete case analysis is performed using mixed model for repeated measures (MMRM) with covariates: treatment \\[THERAPY\\], gender \\[GENDER\\], visit \\[VISIT\\] as factors; baseline score \\[BASVAL\\] as continuous; and visit-by-treatment \\[THERAPY \\* VISIT\\] interaction, and visit-by-baseline \\[BASVAL \\* VISIT\\] interaction. An unstructured covariance matrix is used. The **MIXED** procedure is used.\n\n```sas\nproc mixed data=dat method=reml;\n\t class THERAPY(ref=\"PLACEBO\") VISIT(ref=\"4\") PATIENT GENDER(ref=\"F\");\n\t model CHANGE = THERAPY GENDER VISIT BASVAL THERAPY*VISIT BASVAL*VISIT /s ddfm=satterthwaite;\n\t repeated VISIT / type=UN subject=PATIENT r;\n\t lsmeans THERAPY*VISIT / diff=control(\"PLACEBO\" \"7\") cl;\nrun;\n```\n\nThe parameter estimates of the fixed effects are:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_CompleteCase_fixedEstimates.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe estimated unstructured covariance matrix parameters are:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_CompleteCase_covarianceEstimates.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe treatment difference at visit 7 is of interest, and is estimated to be -2.829 (se=1.117) with 95% CI of \\[-5.033 to -0.624\\] (p=0.0122).\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_CompleteCase_contrastEstimates.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MAR approach\n\nAs described above, the so-called `five macros` will be used for the SAS implementation. The `five macros` are available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data) under Reference-based MI via Multivariate Normal RM (the \"five macros and MIWithD\"). For the details, see the use guide available in the download of the `five macros`.\n\nApplying the `five macros` for reference-based multiple imputation entails the sequential run of the following:\n\n- Part1A declares the parameter estimation model and checks consistency with the dataset. It builds a master dataset which holds details of the current job (run of the macros in sequence). It also builds indexes for the classification variables, which may be either numeric or character.\n\n- Part1B fits the parameter estimation model using the MCMC procedure and draws a pseudo-independent sample from the joint posterior distribution for the linear predictor parameters and the covariance parameters.\n\n- Part2A calculates the predicted mean under MAR, and under MNAR for each subject based on their withdrawal pattern once for each draw of the linear predictor parameter estimates. The choice of MNAR is controlled by the method used, which may vary from subject to subject.\n\n- Part2B imputes the intermediate missing values using MAR and the trailing missing values using MNAR, by deriving the conditional distribution for the missing values conditional on the observed values and covariates, using the appropriate sampled covariance parameter estimates.\n\n- Part3 carries out a univariate ANOVA analysis at selected time points usually based on the same covariates as the parameter estimation model. It then combines the least-squares means and their differences using the MIANALYZE procedure to provide final results. It is in this macro which handles the Delta methods.\n\nMost of the computation time is spent in the Part1B macro where the MCMC procedure is used to generate a sample from the posterior distribution of the full set of model parameters. These are stored away and can be used repeatedly by calling the later macros over and over again using different imputation methods.\n\nTo perform reference-based multiple imputation using MAR approach to following code is used\n\n```sas\n%part1A(jobname = HAMD, \n Data=dat,\n Subject=PATIENT,\n RESPONSE = CHANGE,\n Time = VISIT,\n Treat = THERAPY,\n Covbytime = BASVAL,\n Catcov = GENDER);\n\n%part1B(jobname = HAMD,\n Ndraws = 500,\n thin = 10,\n seed = 12345);\n\n%part2A(jobname = HAMD_MAR,\n inname = HAMD,\n method = MAR);\n\n%part2B(jobname = HAMD_MAR,\n seed = 12345);\n\n%part3(Jobname = HAMD_MAR,\n anref = PLACEBO,\n Label = MAR);\n```\n\nTo print the results of the contrast at week 7\n\n```sas\nproc print data=HAMD_MAR_OUT;\n\twhere VISIT = \"7\";\n\tvar VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MAR_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MNAR CR approach\n\nTo perform reference-based multiple imputation using Copy Reference (CR) approach the following changes are needed in part2A of the 5 macros\n\n```sas\n%part2A(jobname = HAMD_CR,\n inname = HAMD,\n method = CR,\n ref = PLACEBO);\n```\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_CR_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MNAR J2R approach\n\nTo perform reference-based multiple imputation using Jump to Reference (J2R) approach the following changes are needed in part2A of the 5 macros\n\n```sas\n%part2A(jobname = HAMD_J2R,\n inname = HAMD,\n method = J2R,\n ref = PLACEBO);\n```\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_J2R_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Five macros: MNAR CIR approach\n\nTo perform reference-based multiple imputation using Copy Increments in Reference (CIR) approach the following changes are needed in part2A of the 5 macros\n\n```sas\n%part2A(jobname = HAMD_CIR,\n inname = HAMD,\n method = CIR,\n ref = PLACEBO);\n```\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_CIR_contrast.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Summary of results\n\nIn the table we present the results of the different imputation strategies (and with varying number, *M*, of multiple imputation draws). Note that some results can be slightly different from the results above due to a possible different seed. The table show the contrast at Visit 7 between DRUG and PLACEBO \\[DRUG - PLACEBO\\]:\n\n| Method | Estimate | SE | 95% CI | p-value |\n|------------------------|----------|-------|------------------|---------|\n| Complete Case | -2.829 | 1.117 | -5.035 to -0.623 | 0.0123 |\n| MI - MAR (M=500) | -2.810 | 1.122 | -5.029 to -0.592 | 0.0134 |\n| MI - MAR (M=2000) | -2.816 | 1.128 | -5.047 to -0.586 | 0.0137 |\n| MI - MAR (M=5000) | -2.825 | 1.123 | -5.045 to -0.605 | 0.0130 |\n| MI - MNAR CR (M=500) | -2.384 | 1.120 | -4.598 to -0.170 | 0.0350 |\n| MI - MNAR CR (M=2000) | -2.390 | 1.118 | -4.599 to -0.180 | 0.0342 |\n| MI - MNAR CR (M=5000) | -2.400 | 1.115 | -4.604 to -0.196 | 0.0330 |\n| MI - MNAR J2R (M=500) | -2.122 | 1.141 | -4.377 to 0.133 | 0.0650 |\n| MI - MNAR J2R (M=2000) | -2.135 | 1.140 | -4.388 to 0.117 | 0.0630 |\n| MI - MNAR J2R (M=5000) | -2.144 | 1.136 | -4.389 to 0.101 | 0.0611 |\n| MI - MNAR CIR (M=500) | -2.461 | 1.120 | -4.674 to -0.248 | 0.0296 |\n| MI - MNAR CIR (M=2000) | -2.469 | 1.118 | -4.679 to -0.260 | 0.0287 |\n| MI - MNAR CIR (M=5000) | -2.481 | 1.115 | -4.684 to -0.278 | 0.0276 |\n\n## Discussion\n\nA note on computational time. The total running time (including data loading, setting up data sets, MCMC run, imputing data and analysis MI data) for M=500 was about 23 seconds on a personal laptop. It increased to about 44 seconds for M=2000. Computational time was similar across difference imputation strategies.\n\nWith a small number of `Ndraws` in part1B a warning could pop-up \"There is still significant autocorrelation after 5 lags, and the effective sample size for the parameter might not be estimated accurately.\". Increasing the number of `Ndraws` will mostly solve this warning. For example, for this data example, this message is received when setting `Ndraws` below 100.\n\n## Appendix 1: mmrm as analysis model\n\nPart 3 of the 5 macros carries out a univariate ANOVA analysis at selected time points usually based on the same covariates as the parameter estimation model. It then combines the least-squares means and their differences using Rubin’s formula in a calculation similar to that in the MIANALYZE procedure to provide final results.\n\nSince, all imputed datasets are readily available (after part2B), another possibility is to analyse each imputed dataset using the analysis model of your choice, and combining the results using `PROC MIANALYZE`. For example, suppose an MMRM should be fit on each imputed dataset:\n\n```sas\ndata HAMD_CR_DATAFULL;\n\tset HAMD_CR_DATAFULL;\n\t_Imputation_ = draw;\nrun;\t\n\nproc mixed data=HAMD_CR_DATAFULL ;\n\tby _Imputation_;\n class THERAPY(ref=\"PLACEBO\") VISIT(ref=\"4\") PATIENT GENDER(ref=\"F\");\n model CHANGE = THERAPY GENDER VISIT BASVAL THERAPY*VISIT BASVAL*VISIT /s ddfm=satterthwaite;\n repeated VISIT / type=UN subject=PATIENT r;\n lsmeans THERAPY*VISIT / diff=control(\"PLACEBO\" \"7\") cl;\n ods output Diffs=diff01;\nrun;\t\n\nproc mianalyze parms=diff01(where=(VISIT=\"7\"));\n\tclass THERAPY VISIT;\n\tmodeleffects THERAPY*VISIT;\n\tods output ParameterEstimates=res01;\nrun;\n```\n\nThe results for M=500 imputations are\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rbmi/SAS_MNAR_mmrm.PNG){fig-align='center' width=90%}\n:::\n:::\n\n\n## Reference\n\n[Carpenter JR, Roger JH & Kenward MG (2013)](https://doi.org/10.1080/10543406.2013.834911). Analysis of Longitudinal Trials with Protocol Deviation: A Framework for Relevant, Accessible Assumptions, and Inference via MI. *Journal of Biopharmaceutical Statistics* 23: 1352-1371.\n\n[Five macros: Drug Information Association (DIA) Missing Data Working Group (2012)](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data). Reference-based MI via Multivariate Normal RM (the “five macros and MIWithD”). London School of Hygiene and Tropical Medicine DIA Missing Data.\n\n[PROC MIANALYZE, SAS Institute Inc. (2017)](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.3/statug/statug_mianalyze_toc.htm). SAS/STAT® 14.3 User’s Guide. Cary, NC: SAS Institute Inc.\n\n[Roger J (2022, Dec 8)](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. *Addressing intercurrent events: Treatment policy and hypothetical strategies*. Joint EFSPI and BBS virtual event.\n\n[Rubin DB (1976)](https://doi.org/10.1093/biomet/63.3.581). Inference and Missing Data. *Biometrika* 63: 581–592.\n\n[Rubin DB (1987)](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470316696). *Multiple Imputation for Nonresponse in Surveys*. New York: John Wiley & Sons.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/recurrent_events/execute-results/html.json b/_freeze/SAS/recurrent_events/execute-results/html.json index 1471fcc93..6c23fe3ad 100644 --- a/_freeze/SAS/recurrent_events/execute-results/html.json +++ b/_freeze/SAS/recurrent_events/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "d498a3abfb5c0c4dfd626e89f6ac9451", + "hash": "72bb1c5054b51b5de840f018ab2c12e4", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"SAS Recurrent Events\"\nexecute: \n eval: false\n---\n\n\n\n\n\n# Recurrent event models\n\n## Modelling recurrent events\n\n### Methodology introduction\n\nTraditionally, survival analysis focuses on the time to a *single* first event. While there are many applications for such time-to-event analysis in clinical trials, this approach falls short when events of interest can occur multiple times within the same subject. Recurrent event models extend the traditional Cox proportional hazards framework to account for *multiple* events per subject ([Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/), [Amorim et al. 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nIn this tutorial, we will demonstrate how to implement different recurrent event models in SAS, specifically the Andersen-Gill, proportional means/rates (Lin-Wei-Yang-Ying,) Prentice-Williams-Peterson, and Wei-Lin-Weissfeld models. The SAS code follows the layout of [Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf), with additional insights taken from [Lu et al. 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf).\n\nRecurrent event models can roughly be divided in three categories: counting process models, conditional models and marginal models. In the section below, we will explore the difference between each of these approaches. In addition, important aspects of data structure will be discussed by means of two fictional subjects, one with 4 events and 0 censored observations (events at time 6, 9, 56 and 88), and another with 2 events and 1 censored observation (events at time 42, 57, and censored at time 91).\n\nDefine the following:\n\n::: callout-note\n$\\lambda_i(t)$: hazard function for the $i$th subject at time $t$\n\n$\\lambda_{ij}(t)$: hazard function for the $j$th event of the $i$th subject at time $t$\n\n$\\lambda_0(t)$: common baseline hazard for all events\n\n$\\lambda_{0j}(t)$: event-specific baseline hazard for the $j$th event at time $t$\n\n$\\beta$: common parameter vector\n\n$\\beta_j$: event-specific parameter vector for the $j$th event\n\n$X_{ij}$: covariate vector for the $j$th event of the $i$th subject\n:::\n\n### Counting process models\n\n#### Andersen-Gill model ([Andersen & Gill 1982](https://projecteuclid.org/journals/annals-of-statistics/volume-10/issue-4/Coxs-Regression-Model-for-Counting-Processes--A-Large-Sample/10.1214/aos/1176345976.full))\n\n$$\n\\lambda_i(t) = \\lambda_0(t) \\exp \\left( \\beta X_{ij}(t) \\right) \\\n$$\n\n- Counting process approach: treats each subject as a multiple events counting process\n\n- Common baseline hazard $\\lambda_0(t)$\n\n- Common regression coefficients $\\beta$\n\n- Unrestricted risk set: a subject contributes to the risk set for an event as long as the subject is under observation, i.e. it can be at risk for a subsequent event even though the previous event did not yet occur\n\n- Order of events is not important\n\nAn essential assumption of the Andersen-Gill model is that of **independent events** within subjects. This, however, is often not realistic in clinical trial data. For example, let's say that we are modelling myocardial infarction (MI). If a patient has already experienced one MI, their risk of subsequent events may increase due to underlying cardiovascular damage or presence of other risk factors. Thus, the more events a patient has, the more likely they are to experience future events, indicating dependence rather than independence. To accurately model this within-subject correlation, extensions like time-varying covariates, a robust sandwich covariance estimator or frailty terms may be needed. In this tutorial, we will discuss the sandwich correction.\n\n**Lin-Wei-Yang-Ying (LWYY) model or proportional means/rates model ([Lei, Wei, Yang & Ying 2000](https://www.jstor.org/stable/2680616))**\n\nLin, Wei, Yang & Ying introduced an improved version of the Andersen-Gill model in 2000 (often referred to as proportional means/rates model), featuring a robust sandwich estimator that explicitly accounts for individual subject clusters. These robust standard errors yield wider confidence intervals and provide asymptotically valid inference even when the independence assumption does not hold ([Lee et al. 2025](https://pubmed.ncbi.nlm.nih.gov/40490702/)). The original and improved Andersen-Gill model often appear interchangeable in the literature, and while they produce identical estimates, their robust standard errors can differ substantially, which may impact the conclusions drawn from statistical inference.\n\nFor both versions of the Andersen-Gill model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 1 |\n| 1 | (9, 56\\] | 1 | 1 |\n| 1 | (56, 88\\] | 1 | 1 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 1 |\n| 2 | (87, 91\\] | 0 | 1 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/AG_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn both versions of the Andersen-Gill model, each new time interval starts where the previous one ends.\n\n### Conditional models\n\n#### Prentice-Williams-Peterson model ([Prentice, Williams & Peterson 1981](https://academic.oup.com/biomet/article-abstract/68/2/373/260402?redirectedFrom=fulltext))\n\n- Conditional approach: incorporates conditional strata to account for ordering/dependence of events\n\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n\n- Stratified regression coefficients $\\beta_j$: can be pooled ($\\beta$) or kept as event-specific ($\\beta_j$) in the output\n\n- Restricted risk set: contributions to the risk set for a subsequent event are restricted to only consider subjects that already experienced the previous event\n\n- Order of events is important\n\nThe Prentice-Williams-Peterson model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$.\n\nDepending on the outcome of interest, Prentice, Williams and Peterson suggested two distinct models:\n\n1. **Total time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe total time variant of the Prentice-Williams-Peterson model uses the same time intervals as the counting process approach (Andersen-Gill model), which is useful for modelling the full time course ($t$) of the recurrent event process, i.e. the hazard of *any* recurrence.\n\nFor the total time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 2 |\n| 1 | (9, 56\\] | 1 | 3 |\n| 1 | (56, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 2 |\n| 2 | (87, 91\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPtt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nAgain, in the total time model, each new time intervals starts where the previous one ends.\n\n2. **Gap time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t - t_{j-1}) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe gap time variant of the Prentice-Williams-Peterson model uses time intervals that start at zero and end at the length of time until the next event, which is useful for modelling the time between each of the recurring events ($t - t_{j-1}$), i.e. the hazard of recurrence *after the previous event*.\n\nFor the gap time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 3\\] | 1 | 2 |\n| 1 | (0, 47\\] | 1 | 3 |\n| 1 | (0, 32\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 45\\] | 1 | 2 |\n| 2 | (0, 3\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPgt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn the gap time model, each time interval starts at zero and has a length equal to the gap time between two neighboring events.\n\n### Marginal models\n\n#### Wei-Lin-Weissfeld model ([Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084))\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\n- Marginal approach: treats each (recurrent) event as having a separate, marginal process\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n- Semi-restricted risk set: all subjects contribute follow-up times to all *potential* events, i.e. each subject is at risk for all *potential* events, regardless of how many events that subject actually experiences\n- Order of events is not important\n\nAlthough the Wei-Lin-Weissfeld model has it roots in competing risks analysis, it conveniently lends itself to model recurrent events as well. Like the Andersen-Gill model, the Wei-Lin-Weissfeld model also assumes **independence** of events, which is often not feasible in practice. In addition, it is assumed there is no specific order among the events or that the events are different types of events, and not necessarily *recurrent* events.\n\nLike the Prentice-Williams-Peterson models, the Wei-Lin-Weissfeld model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$. Another approach is to combine event-specific effects $\\beta_j$ to get an estimator of the average treatment effect, as described in [Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084) (this is not discussed further here).\n\nFor Wei-Lin-Weissfeld models, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 9\\] | 1 | 2 |\n| 1 | (0, 56\\] | 1 | 3 |\n| 1 | (0, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 87\\] | 1 | 2 |\n| 2 | (0, 91\\] | 0 | 3 |\n| 2 | (0, 91\\] | 0 | 4 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\nIn the Wei-Lin-Weissfeld model, each time intervals starts at zero and ends at its respective event time.\n\n### Overview of all models\n\nIn summary, the selection of the model to use would depend on the type of events, the importance of the order of the events and the time intervals to be analyzed. We made an effort to summarize the similarities and differences between the models in the table below.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Approach** | counting process | conditional | conditional | marginal |\n| **Baseline hazard** | common | stratified | stratified | stratified |\n| **Regression coefficients** | common | stratified possible | stratified possible | stratified possible |\n| **Risk set** | unrestricted | restricted | restricted | semi-restricted |\n| **Time interval** | total time | total time | gap time | total time |\n| **Order of events** | not important | important | important | not important |\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nNote that, because the ordering of events is not important in the Andersen-Gill and Wei-Lin-Weissfeld model, these models come with the assumption of **independence** of events. In contrast, the Prentice-Williams-Peterson models overcome the need for this assumption by capturing the dependence structure between recurrent events in conditional strata. Consequently, events are assumed to be *conditionally* independent in the Prentice-Williams-Peterson models.\n\nA nice visual representation of the stratification and time interval structure of each model is given below. The correct data structure is pivotal when modelling recurrent events and depends on the methodology you want to use, as illustrated in the figure.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/combined_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Modelling recurrent events using the `survival` package\n\n### Data\n\nFor this tutorial we will use the [`bladder` data](https://rdrr.io/cran/survival/man/bladder.html) from the [`survival` R package](https://cran.r-project.org/web/packages/survival/survival.pdf), which captures recurrences of bladder cancer from a clinical trial for an oncolytic called thiotepa. The `bladder` data is regularly used by many statisticians to demonstrate methodology for recurrent event modelling. Somewhat confusingly, there are three versions of this data available:\n\n- `bladder1`: original data from the study on all subjects (294 records)\n\n- `bladder2`: data in Andersen-Gill format on subset of subjects with nonzero follow-up time (178 records)\n\n- `bladder`: data in Wei-Lin-Weissfeld format on subset of subjects with nonzero follow-up time (340 records)\n\nFor this tutorial, we will use `bladder2` to illustrate Andersen-Gill and Prentice-Williams-Peterson models, and `bladder` to illustrate the Wei-Lin-Weissfeld model.\n\nThe variables included in both datasets are:\n\n- **id**: patient id\n\n- **rx**: treatment group (1 = placebo, 2 = thiotepa)\n\n- **number**: initial number of tumors (8 = 8 or more)\n\n- **size**: size in cm of largest initial tumor\n\n- **start**: start of time interval; this variable is **not** present in `bladder`\n\n- **stop**: (recurrent) event or censoring time\n\n- **event**: event indicator (1 = event, 0 = censored)\n\n- **enum**: order of recurrence\n\nImportantly, both datasets collect the data in a **counting process** structure. This means that there is one record for each subject and time interval, where a time interval is defined as the time to its respective event (**event** = 1), or the time to follow-up if the event did not occur (**event** = 0).\n\nLet's look more closely at the `bladder2` and `bladder` data:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- survival::bladder2\ngt(head(bladder2, 6))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
idrxnumbersizestartstopeventenum
11130101
21210401
31110701
415101001
51410611
514161002
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
246
327
420
\n
\n```\n\n:::\n:::\n\n\nIn `bladder2`, in the Andersen-Gill format, each subject has a variable amount of records, depending on the amount of events that subject experienced.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder <- survival::bladder\ngt(head(bladder, 20))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
idrxnumbersizestopeventenum
1113101
1113102
1113103
1113104
2121401
2121402
2121403
2121404
3111701
3111702
3111703
3111704
41511001
41511002
41511003
41511004
5141611
51411002
51411003
51411004
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
285
385
485
\n
\n```\n\n:::\n:::\n\n\nIn `bladder`, in the Wei-Lin-Weissfeld format, each subject has four records, regardless of how many events that subject actually experienced. In addition, there is no `start` variable, as all time intervals start at zero.\n\n[**Note:**]{.underline} The variables **id**, **start** and **stop** were renamed to **subjid**, **tstart** and **tstop** to avoid using SAS key words as variable names.\n\n### Analysis\n\nIn SAS, any survival analysis based on the Cox proportional hazard model can be conducted using the `phreg` procedure. Hence, conveniently, when modelling time-to-event data with recurrent events, the same procedure can be used. The caveat here is that an adequate data structure is required, which must be in correspondence with the model you want to use.\n\nIn this section of the tutorial, we will explain how the arguments of the `phreg` procedure and data structure must be defined to fit every type of recurrent event model correctly.\n\n#### Andersen-Gill model\n\n1. **Improved Andersen-Gill model (LWYY model or proportional means/rates model)**\n\nFor the improved version of the Andersen-Gill model you must include:\n\n- `proc phreg data=bladder2 covs(aggregate)`\n- `model (tstart, tstop) * event(0) = 'predictors';`\n- `id subjid;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event |\n|--------|---------------|--------|-------|-------|\n| 1 | (0, 1\\] | 0 | 1 | 0 |\n| 2 | (0, 4\\] | 0 | 4 | 0 |\n| 3 | (0, 7\\] | 0 | 7 | 0 |\n| 4 | (0, 10\\] | 0 | 10 | 0 |\n| 5 | (0, 6\\] | 0 | 6 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 |\n\nWe will use the `bladder2` data for this.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder2 covs(aggregate);\n\tclass rx (ref='1');\n\tmodel (tstart, tstop) * event(0) = rx size number /rl;\n\tid subjid;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\nBy including the `covs(aggregate)` option and setting `id subjid;`, SAS will compute a robust sandwich covariance and display robust standard error estimates in the output. Under the hood, the robust standard errors will consider all **subjid** clusters separately and ultimately sum up the score residuals for each distinct cluster.\n\n2. **Original Andersen-Gill model**\n\nThe original Andersen-Gill model of 1989 can be fitted by changing `covs(aggregate)` to `covs` in the procedure, while excluding `id subjid;`.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder2 covs;\n\tclass rx (ref='1');\n\tmodel (tstart, tstop) * event(0) = rx size number /rl;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_AG_original.png){fig-align='center' width=80%}\n:::\n:::\n\n\nAlthough the original Andersen-Gill model does not consider separate **subjid** clusters, it still computes robust standard errors using the sandwich estimator. The resulting robust standard errors differ from those provided for the improved Andersen-Gill model, while the estimated coefficients remain perfectly unchanged.\n\n#### Prentice-Williams-Peterson model\n\n**Total time model**\n\nFor the Prentice-Williams-Peterson total time model you must include:\n\n- `proc phreg data=bladder2 covs(aggregate);`\n- `model (tstart, tstop) * event(0) = 'predictors';`\n- `id subjid;`\n- `strata enum;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event | Enum |\n|--------|---------------|--------|-------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 | 2 |\n\nWe will use the `bladder2` data for this.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder2 covs(aggregate);\n\tclass rx (ref='1');\n\tmodel (tstart, tstop) * event(0) = rx size number /rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPtt.png){fig-align='center' width=80%}\n:::\n:::\n\n\nThe conditional strata of the Prentice-Williams-Peterson model are set by `strata enum;` in the formula, where `enum` captures the ordering of recurrent events.\n\n**Gap time model**\n\nFor the Prentice-Williams-Peterson gap time model you must include:\n\n- `proc phreg data=bladder2 covs(aggregate);`\n- `model gtime * event(0) = 'predictors';`\n- `id subjid;`\n- `strata enum;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event | Enum |\n|--------|---------------|--------|-------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 4\\] | 0 | 4 | 0 | 2 |\n\nThis data structure can be achieved in `bladder2` by adding a `gtime` variable.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata bladder2;\n\tset bladder2;\n\tgtime = tstop - tstart;\nrun;\n```\n:::\n\n\nWe artificially set start = 0 for each gap time interval by including `gtime` instead of `(start, stop)` in the `model` statement.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder2 covs(aggregate);\n\tclass rx (ref='1');\n\tmodel gtime * event(0) = rx size number/rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPgt.png){fig-align='center' width=80%}\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\nFor the Wei-Lin-Weissfeld model you must include:\n\n- `proc phreg data=bladder covs(aggregate);`\n- `model tstop * event(0) = 'predictors';`\n- `id subjid;`\n- `strata enum;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event | Enum |\n|--------|---------------|--------|-------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 2 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 3 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 4 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 2 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 3 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 4 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 2 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 3 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 4 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 4 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 4 |\n\nWe will use the `bladder` data for this.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder covs(aggregate);\n\tclass rx (ref='1');\n\tmodel tstop * event(0) = rx size number /rl;\n\tid subjid;\n strata enum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_WLW.png){fig-align='center' width=80%}\n:::\n:::\n\n\nImportantly, the strata of the Wei-Lin-Weissfeld model as set by `strata enum;` are substantially different from the conditional strata of the Prentice-Williams-Peterson model. The `enum` variable is now no longer assumed to be an ordinal variable.\n\n#### Important notes\n\n[**Note:**]{.underline} The `rl` option ensures the 95% confidence interval for the hazard ratio is displayed.\n\n[**Note:**]{.underline} If you want to display non-robust, model-based standard errors (like the ones given by default in R), you can do this by adding `covm` to the procedure statement.\n\n[**Note:**]{.underline} It may be useful to look at the **Summary of the Number of Event and Censored Values** to check whether the data stratification was rightly specified for your model. Examples for the Prentice-Williams-Peterson and Wei-Lin-Weissfeld models are given below.\n\nSummary for Prentice-Williams-Peterson models:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWP_structure.png){fig-align='center' width=60%}\n:::\n:::\n\n\nSummary for Wei-Lin-Weissfeld model:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_WLW_structure.png){fig-align='center' width=60%}\n:::\n:::\n\n\n[**Note:**]{.underline} R uses `ties = \"efron\"` by default, while SAS uses `ties = breslow` by default. If this argument remains unchanged in both software, it can cause differences in outcome. For more information, be sure to check the [CAMIS webpage](https://psiaims.github.io/CAMIS/Comp/r-sas_survival.html) on the comparison of Cox proportional hazards models in R and SAS.\n\n### Interpretation\n\nIn terms of interpretation, hazard ratios ($\\exp(\\beta_j)$) are often used when making inferences based on Cox proportional hazards models. Now, as you may remember from the overview presented earlier, it is important to recognize that each of the recurrent event models comes with a slightly different interpretation of the hazard ratio, as defined by the assumptions around the model.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nThis means that, for the `bladder` data, we can draw slightly different conclusions on the hazard ratio of the group treated with thiotepa (**rx** = 2) versus the placebo group (**rx** = 1).\n\n| Model | HR: rx2 vs rx1 | 95% CI | P-value |\n|-------------|----------------|----------------|---------|\n| AG | 0.631 | 0.381 to 1.047 | 0.0747 |\n| Original AG | 0.631 | 0.403 to 0.989 | 0.0447 |\n| PWPtt | 0.716 | 0.486 to 1.053 | 0.0898 |\n| PWPgt | 0.764 | 0.508 to 1.148 | 0.1952 |\n| WLW | 0.560 | 0.309 to 1.015 | 0.0560 |\n\nThese conclusions are:\n\n- **Andersen-Gill**: the risk of having *any new tumor recurrence* in the treatment group is 0.631 (0.381 - 1.047) times that of the placebo group\n\n- **Prentice-Williams-Peterson: total time**: the risk of having *any new tumor recurrence* in the treatment group is 0.716 (0.486 - 1.053) times that of the placebo group\n\n- **Prentice-Williams-Peterson: gap time**: the risk of having *a new tumor recurrence after a previous event* in the treatment group is 0.764 (0.508 - 1.148) times that of the placebo group\n\n- **Wei-Lin-Weissfeld**: the risk of having *any type of event* in the treatment group is 0.560 (0.309 - 1.015) times that of the placebo group\n\n[**Note:**]{.underline} The improved Andersen-Gill model (LWYY model or proportional means/rates model) is preferred over the original Andersen-Gill model.\n\n### Event-specific estimates\n\nFor the Prentice-Williams-Peterson and Wei-Lin-Weissfeld models we can incorporate both overall ($\\beta$) and event-specific ($\\beta_j$) effects for each covariate. To arrive at pooled model parameters these models assume that $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$. Until now, we have only considered pooled model parameters, but given the underlying stratification of these two models in particular, it may be valuable to look into the event-specific estimates as well ([Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nTo get event-specific estimates for the treatment effect (**rx**), we first need to introduce four new **rx** variables to the `bladder2` and `bladder` datasets, one for each stratum.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata bladder2;\n\tset bladder2;\n\trx_enum1 = rx*(enum=1);\n\trx_enum2 = rx*(enum=2);\n\trx_enum3 = rx*(enum=3);\n\trx_enum4 = rx*(enum=4);\nrun;\n```\n:::\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata bladder;\n\tset bladder;\n\trx_enum1 = rx*(enum=1);\n\trx_enum2 = rx*(enum=2);\n\trx_enum3 = rx*(enum=3);\n\trx_enum4 = rx*(enum=4);\nrun;\n```\n:::\n\n\nWith these four interaction variables, we need to specify `rx_enum1-rx_enum4` in the formula and set `class enum / param=glm;` to output the event-specific estimates.\n\n#### Prentice-Williams-Peterson model\n\n**Total time model**\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder2 covs(aggregate);\n\tclass enum / param=glm;\n\tmodel (tstart, tstop) * event(0) = rx_enum1-rx_enum4 size number /rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPtt_stratified.png){fig-align='center' width=80%}\n:::\n:::\n\n\n**Gap time model**\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder2 covs(aggregate);\n\tclass enum / param=glm;\n\tmodel gtime * event(0) = rx_enum1-rx_enum4 size number/rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPgt_stratified.png){fig-align='center' width=80%}\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=bladder covs(aggregate);\n\tclass enum / param=glm;\n\tmodel tstop * event(0) = rx_enum1-rx_enum4 size number /rl;\n\tid subjid;\n strata enum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_WLW_stratified.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## References\n\n[Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf). Eat, Sleep, R, Repeat.\n\n[Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/). Modelling recurrent events: a tutorial for analysis in epidemiology. *International Journal of Epidemiology*. 2015 Feb;44(1):324-33.\n\n[Andersen & Gill 1982](https://www.jstor.org/stable/2240714). Cox’s Regression Model for Counting Processes: A Large Sample Study. *The Annals of Statistics*. 10(4):1100–1120.\n\n[bladder data](https://rdrr.io/cran/survival/man/bladder.html)\n\n[Lu & Shen 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf). Application of Survival Analysis in Multiple Events Using SAS. *PharmaSUG 2018*.\n\n[Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/). A systematic comparison of recurrent event models for application to composite endpoints. *BMC Medical Research Methodoly*. 2018 Jan 4;18(1):2.\n\n[Prentice, Williams & Peterson 1981](https://www.jstor.org/stable/2335582). On the Regression Analysis of Multivariate Failure Time Data. *Biometrika*. 68(2):373–379.\n\n[survival package](https://cran.r-project.org/web/packages/survival/vignettes/survival.pdf)\n\n[Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084). Regression Analysis of Multivariate Incomplete Failure Time Data by Modeling Marginal Distributions. *Journal of the American Statistical Association*. 84(408):1065–1073.", + "markdown": "---\ntitle: \"SAS Recurrent Events\"\n---\n\n\n\n\n\n# Recurrent event models\n\n## Modelling recurrent events\n\n### Methodology introduction\n\nTraditionally, survival analysis focuses on the time to a *single* first event. While there are many applications for such time-to-event analysis in clinical trials, this approach falls short when events of interest can occur multiple times within the same subject. Recurrent event models extend the traditional Cox proportional hazards framework to account for *multiple* events per subject ([Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/), [Amorim et al. 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nIn this tutorial, we will demonstrate how to implement different recurrent event models in SAS, specifically the Andersen-Gill, proportional means/rates (Lin-Wei-Yang-Ying,) Prentice-Williams-Peterson, and Wei-Lin-Weissfeld models. The SAS code follows the layout of [Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf), with additional insights taken from [Lu et al. 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf).\n\nRecurrent event models can roughly be divided in three categories: counting process models, conditional models and marginal models. In the section below, we will explore the difference between each of these approaches. In addition, important aspects of data structure will be discussed by means of two fictional subjects, one with 4 events and 0 censored observations (events at time 6, 9, 56 and 88), and another with 2 events and 1 censored observation (events at time 42, 57, and censored at time 91).\n\nDefine the following:\n\n::: callout-note\n$\\lambda_i(t)$: hazard function for the $i$th subject at time $t$\n\n$\\lambda_{ij}(t)$: hazard function for the $j$th event of the $i$th subject at time $t$\n\n$\\lambda_0(t)$: common baseline hazard for all events\n\n$\\lambda_{0j}(t)$: event-specific baseline hazard for the $j$th event at time $t$\n\n$\\beta$: common parameter vector\n\n$\\beta_j$: event-specific parameter vector for the $j$th event\n\n$X_{ij}$: covariate vector for the $j$th event of the $i$th subject\n:::\n\n### Counting process models\n\n#### Andersen-Gill model ([Andersen & Gill 1982](https://projecteuclid.org/journals/annals-of-statistics/volume-10/issue-4/Coxs-Regression-Model-for-Counting-Processes--A-Large-Sample/10.1214/aos/1176345976.full))\n\n$$\n\\lambda_i(t) = \\lambda_0(t) \\exp \\left( \\beta X_{ij}(t) \\right) \\\n$$\n\n- Counting process approach: treats each subject as a multiple events counting process\n\n- Common baseline hazard $\\lambda_0(t)$\n\n- Common regression coefficients $\\beta$\n\n- Unrestricted risk set: a subject contributes to the risk set for an event as long as the subject is under observation, i.e. it can be at risk for a subsequent event even though the previous event did not yet occur\n\n- Order of events is not important\n\nAn essential assumption of the Andersen-Gill model is that of **independent events** within subjects. This, however, is often not realistic in clinical trial data. For example, let's say that we are modelling myocardial infarction (MI). If a patient has already experienced one MI, their risk of subsequent events may increase due to underlying cardiovascular damage or presence of other risk factors. Thus, the more events a patient has, the more likely they are to experience future events, indicating dependence rather than independence. To accurately model this within-subject correlation, extensions like time-varying covariates, a robust sandwich covariance estimator or frailty terms may be needed. In this tutorial, we will discuss the sandwich correction.\n\n**Lin-Wei-Yang-Ying (LWYY) model or proportional means/rates model ([Lei, Wei, Yang & Ying 2000](https://www.jstor.org/stable/2680616))**\n\nLin, Wei, Yang & Ying introduced an improved version of the Andersen-Gill model in 2000 (often referred to as proportional means/rates model), featuring a robust sandwich estimator that explicitly accounts for individual subject clusters. These robust standard errors yield wider confidence intervals and provide asymptotically valid inference even when the independence assumption does not hold ([Lee et al. 2025](https://pubmed.ncbi.nlm.nih.gov/40490702/)). The original and improved Andersen-Gill model often appear interchangeable in the literature, and while they produce identical estimates, their robust standard errors can differ substantially, which may impact the conclusions drawn from statistical inference.\n\nFor both versions of the Andersen-Gill model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 1 |\n| 1 | (9, 56\\] | 1 | 1 |\n| 1 | (56, 88\\] | 1 | 1 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 1 |\n| 2 | (87, 91\\] | 0 | 1 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/AG_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn both versions of the Andersen-Gill model, each new time interval starts where the previous one ends.\n\n### Conditional models\n\n#### Prentice-Williams-Peterson model ([Prentice, Williams & Peterson 1981](https://academic.oup.com/biomet/article-abstract/68/2/373/260402?redirectedFrom=fulltext))\n\n- Conditional approach: incorporates conditional strata to account for ordering/dependence of events\n\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n\n- Stratified regression coefficients $\\beta_j$: can be pooled ($\\beta$) or kept as event-specific ($\\beta_j$) in the output\n\n- Restricted risk set: contributions to the risk set for a subsequent event are restricted to only consider subjects that already experienced the previous event\n\n- Order of events is important\n\nThe Prentice-Williams-Peterson model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$.\n\nDepending on the outcome of interest, Prentice, Williams and Peterson suggested two distinct models:\n\n1. **Total time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe total time variant of the Prentice-Williams-Peterson model uses the same time intervals as the counting process approach (Andersen-Gill model), which is useful for modelling the full time course ($t$) of the recurrent event process, i.e. the hazard of *any* recurrence.\n\nFor the total time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (6, 9\\] | 1 | 2 |\n| 1 | (9, 56\\] | 1 | 3 |\n| 1 | (56, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (42, 87\\] | 1 | 2 |\n| 2 | (87, 91\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPtt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nAgain, in the total time model, each new time intervals starts where the previous one ends.\n\n2. **Gap time model**\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t - t_{j-1}) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\nThe gap time variant of the Prentice-Williams-Peterson model uses time intervals that start at zero and end at the length of time until the next event, which is useful for modelling the time between each of the recurring events ($t - t_{j-1}$), i.e. the hazard of recurrence *after the previous event*.\n\nFor the gap time model, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 3\\] | 1 | 2 |\n| 1 | (0, 47\\] | 1 | 3 |\n| 1 | (0, 32\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 45\\] | 1 | 2 |\n| 2 | (0, 3\\] | 0 | 3 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/PWPgt_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn the gap time model, each time interval starts at zero and has a length equal to the gap time between two neighboring events.\n\n### Marginal models\n\n#### Wei-Lin-Weissfeld model ([Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084))\n\n$$\n\\lambda_{ij}(t) = \\lambda_{0j}(t) \\exp \\left( \\beta_j X_{ij}(t) \\right) \\\n$$\n\n- Marginal approach: treats each (recurrent) event as having a separate, marginal process\n- Stratified baseline hazard $\\lambda_{0j}(t)$\n- Semi-restricted risk set: all subjects contribute follow-up times to all *potential* events, i.e. each subject is at risk for all *potential* events, regardless of how many events that subject actually experiences\n- Order of events is not important\n\nAlthough the Wei-Lin-Weissfeld model has it roots in competing risks analysis, it conveniently lends itself to model recurrent events as well. Like the Andersen-Gill model, the Wei-Lin-Weissfeld model also assumes **independence** of events, which is often not feasible in practice. In addition, it is assumed there is no specific order among the events or that the events are different types of events, and not necessarily *recurrent* events.\n\nLike the Prentice-Williams-Peterson models, the Wei-Lin-Weissfeld model can incorporate both overall and event-specific effects $\\beta_j$ for each covariate. An often made assumption is to set $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$ to estimate a common parameter $\\beta$. Another approach is to combine event-specific effects $\\beta_j$ to get an estimator of the average treatment effect, as described in [Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084) (this is not discussed further here).\n\nFor Wei-Lin-Weissfeld models, the data must be structured as follows:\n\n| Subject | Time interval | Event | Stratum |\n|---------|---------------|-------|---------|\n| 1 | (0, 6\\] | 1 | 1 |\n| 1 | (0, 9\\] | 1 | 2 |\n| 1 | (0, 56\\] | 1 | 3 |\n| 1 | (0, 88\\] | 1 | 4 |\n| 2 | (0, 42\\] | 1 | 1 |\n| 2 | (0, 87\\] | 1 | 2 |\n| 2 | (0, 91\\] | 0 | 3 |\n| 2 | (0, 91\\] | 0 | 4 |\n\nThis can be visually represented:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/WLW_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\nIn the Wei-Lin-Weissfeld model, each time intervals starts at zero and ends at its respective event time.\n\n### Overview of all models\n\nIn summary, the selection of the model to use would depend on the type of events, the importance of the order of the events and the time intervals to be analyzed. We made an effort to summarize the similarities and differences between the models in the table below.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Approach** | counting process | conditional | conditional | marginal |\n| **Baseline hazard** | common | stratified | stratified | stratified |\n| **Regression coefficients** | common | stratified possible | stratified possible | stratified possible |\n| **Risk set** | unrestricted | restricted | restricted | semi-restricted |\n| **Time interval** | total time | total time | gap time | total time |\n| **Order of events** | not important | important | important | not important |\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nNote that, because the ordering of events is not important in the Andersen-Gill and Wei-Lin-Weissfeld model, these models come with the assumption of **independence** of events. In contrast, the Prentice-Williams-Peterson models overcome the need for this assumption by capturing the dependence structure between recurrent events in conditional strata. Consequently, events are assumed to be *conditionally* independent in the Prentice-Williams-Peterson models.\n\nA nice visual representation of the stratification and time interval structure of each model is given below. The correct data structure is pivotal when modelling recurrent events and depends on the methodology you want to use, as illustrated in the figure.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/combined_lineplot.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Modelling recurrent events using the `survival` package\n\n### Data\n\nFor this tutorial we will use the [`bladder` data](https://rdrr.io/cran/survival/man/bladder.html) from the [`survival` R package](https://cran.r-project.org/web/packages/survival/survival.pdf), which captures recurrences of bladder cancer from a clinical trial for an oncolytic called thiotepa. The `bladder` data is regularly used by many statisticians to demonstrate methodology for recurrent event modelling. Somewhat confusingly, there are three versions of this data available:\n\n- `bladder1`: original data from the study on all subjects (294 records)\n\n- `bladder2`: data in Andersen-Gill format on subset of subjects with nonzero follow-up time (178 records)\n\n- `bladder`: data in Wei-Lin-Weissfeld format on subset of subjects with nonzero follow-up time (340 records)\n\nFor this tutorial, we will use `bladder2` to illustrate Andersen-Gill and Prentice-Williams-Peterson models, and `bladder` to illustrate the Wei-Lin-Weissfeld model.\n\nThe variables included in both datasets are:\n\n- **id**: patient id\n\n- **rx**: treatment group (1 = placebo, 2 = thiotepa)\n\n- **number**: initial number of tumors (8 = 8 or more)\n\n- **size**: size in cm of largest initial tumor\n\n- **start**: start of time interval; this variable is **not** present in `bladder`\n\n- **stop**: (recurrent) event or censoring time\n\n- **event**: event indicator (1 = event, 0 = censored)\n\n- **enum**: order of recurrence\n\nImportantly, both datasets collect the data in a **counting process** structure. This means that there is one record for each subject and time interval, where a time interval is defined as the time to its respective event (**event** = 1), or the time to follow-up if the event did not occur (**event** = 0).\n\nLet's look more closely at the `bladder2` and `bladder` data:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 <- survival::bladder2\ngt(head(bladder2, 6))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n
idrxnumbersizestartstopeventenum
11130101
21210401
31110701
415101001
51410611
514161002
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder2 %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
246
327
420
\n
\n```\n\n:::\n:::\n\n\nIn `bladder2`, in the Andersen-Gill format, each subject has a variable amount of records, depending on the amount of events that subject experienced.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder <- survival::bladder\ngt(head(bladder, 20))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
idrxnumbersizestopeventenum
1113101
1113102
1113103
1113104
2121401
2121402
2121403
2121404
3111701
3111702
3111703
3111704
41511001
41511002
41511003
41511004
5141611
51411002
51411003
51411004
\n
\n```\n\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbladder %>%\n group_by(enum) %>% summarise(n = n()) %>% gt()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n \n
enumn
185
285
385
485
\n
\n```\n\n:::\n:::\n\n\nIn `bladder`, in the Wei-Lin-Weissfeld format, each subject has four records, regardless of how many events that subject actually experienced. In addition, there is no `start` variable, as all time intervals start at zero.\n\n[**Note:**]{.underline} The variables **id**, **start** and **stop** were renamed to **subjid**, **tstart** and **tstop** to avoid using SAS key words as variable names.\n\n### Analysis\n\nIn SAS, any survival analysis based on the Cox proportional hazard model can be conducted using the `phreg` procedure. Hence, conveniently, when modelling time-to-event data with recurrent events, the same procedure can be used. The caveat here is that an adequate data structure is required, which must be in correspondence with the model you want to use.\n\nIn this section of the tutorial, we will explain how the arguments of the `phreg` procedure and data structure must be defined to fit every type of recurrent event model correctly.\n\n#### Andersen-Gill model\n\n1. **Improved Andersen-Gill model (LWYY model or proportional means/rates model)**\n\nFor the improved version of the Andersen-Gill model you must include:\n\n- `proc phreg data=bladder2 covs(aggregate)`\n- `model (tstart, tstop) * event(0) = 'predictors';`\n- `id subjid;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event |\n|--------|---------------|--------|-------|-------|\n| 1 | (0, 1\\] | 0 | 1 | 0 |\n| 2 | (0, 4\\] | 0 | 4 | 0 |\n| 3 | (0, 7\\] | 0 | 7 | 0 |\n| 4 | (0, 10\\] | 0 | 10 | 0 |\n| 5 | (0, 6\\] | 0 | 6 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 |\n\nWe will use the `bladder2` data for this.\n\n```sas\nproc phreg data=bladder2 covs(aggregate);\n\tclass rx (ref='1');\n\tmodel (tstart, tstop) * event(0) = rx size number /rl;\n\tid subjid;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_AG.png){fig-align='center' width=80%}\n:::\n:::\n\n\nBy including the `covs(aggregate)` option and setting `id subjid;`, SAS will compute a robust sandwich covariance and display robust standard error estimates in the output. Under the hood, the robust standard errors will consider all **subjid** clusters separately and ultimately sum up the score residuals for each distinct cluster.\n\n2. **Original Andersen-Gill model**\n\nThe original Andersen-Gill model of 1989 can be fitted by changing `covs(aggregate)` to `covs` in the procedure, while excluding `id subjid;`.\n\n```sas\nproc phreg data=bladder2 covs;\n\tclass rx (ref='1');\n\tmodel (tstart, tstop) * event(0) = rx size number /rl;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_AG_original.png){fig-align='center' width=80%}\n:::\n:::\n\n\nAlthough the original Andersen-Gill model does not consider separate **subjid** clusters, it still computes robust standard errors using the sandwich estimator. The resulting robust standard errors differ from those provided for the improved Andersen-Gill model, while the estimated coefficients remain perfectly unchanged.\n\n#### Prentice-Williams-Peterson model\n\n**Total time model**\n\nFor the Prentice-Williams-Peterson total time model you must include:\n\n- `proc phreg data=bladder2 covs(aggregate);`\n- `model (tstart, tstop) * event(0) = 'predictors';`\n- `id subjid;`\n- `strata enum;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event | Enum |\n|--------|---------------|--------|-------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (6, 10\\] | 6 | 10 | 0 | 2 |\n\nWe will use the `bladder2` data for this.\n\n```sas\nproc phreg data=bladder2 covs(aggregate);\n\tclass rx (ref='1');\n\tmodel (tstart, tstop) * event(0) = rx size number /rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPtt.png){fig-align='center' width=80%}\n:::\n:::\n\n\nThe conditional strata of the Prentice-Williams-Peterson model are set by `strata enum;` in the formula, where `enum` captures the ordering of recurrent events.\n\n**Gap time model**\n\nFor the Prentice-Williams-Peterson gap time model you must include:\n\n- `proc phreg data=bladder2 covs(aggregate);`\n- `model gtime * event(0) = 'predictors';`\n- `id subjid;`\n- `strata enum;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event | Enum |\n|--------|---------------|--------|-------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 4\\] | 0 | 4 | 0 | 2 |\n\nThis data structure can be achieved in `bladder2` by adding a `gtime` variable.\n\n```sas\ndata bladder2;\n\tset bladder2;\n\tgtime = tstop - tstart;\nrun;\n```\n\nWe artificially set start = 0 for each gap time interval by including `gtime` instead of `(start, stop)` in the `model` statement.\n\n```sas\nproc phreg data=bladder2 covs(aggregate);\n\tclass rx (ref='1');\n\tmodel gtime * event(0) = rx size number/rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPgt.png){fig-align='center' width=80%}\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\nFor the Wei-Lin-Weissfeld model you must include:\n\n- `proc phreg data=bladder covs(aggregate);`\n- `model tstop * event(0) = 'predictors';`\n- `id subjid;`\n- `strata enum;`\n\nAnd the data structure must be:\n\n| Subjid | Time interval | Tstart | Tstop | Event | Enum |\n|--------|---------------|--------|-------|-------|------|\n| 1 | (0, 1\\] | 0 | 1 | 0 | 1 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 2 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 3 |\n| 1 | (0, 1\\] | 0 | 1 | 0 | 4 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 1 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 2 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 3 |\n| 2 | (0, 4\\] | 0 | 4 | 0 | 4 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 1 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 2 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 3 |\n| 3 | (0, 7\\] | 0 | 7 | 0 | 4 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 1 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 4 | (0, 10\\] | 0 | 10 | 0 | 4 |\n| 5 | (0, 6\\] | 0 | 6 | 1 | 1 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 2 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 3 |\n| 5 | (0, 10\\] | 0 | 10 | 0 | 4 |\n\nWe will use the `bladder` data for this.\n\n```sas\nproc phreg data=bladder covs(aggregate);\n\tclass rx (ref='1');\n\tmodel tstop * event(0) = rx size number /rl;\n\tid subjid;\n strata enum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_WLW.png){fig-align='center' width=80%}\n:::\n:::\n\n\nImportantly, the strata of the Wei-Lin-Weissfeld model as set by `strata enum;` are substantially different from the conditional strata of the Prentice-Williams-Peterson model. The `enum` variable is now no longer assumed to be an ordinal variable.\n\n#### Important notes\n\n[**Note:**]{.underline} The `rl` option ensures the 95% confidence interval for the hazard ratio is displayed.\n\n[**Note:**]{.underline} If you want to display non-robust, model-based standard errors (like the ones given by default in R), you can do this by adding `covm` to the procedure statement.\n\n[**Note:**]{.underline} It may be useful to look at the **Summary of the Number of Event and Censored Values** to check whether the data stratification was rightly specified for your model. Examples for the Prentice-Williams-Peterson and Wei-Lin-Weissfeld models are given below.\n\nSummary for Prentice-Williams-Peterson models:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWP_structure.png){fig-align='center' width=60%}\n:::\n:::\n\n\nSummary for Wei-Lin-Weissfeld model:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_WLW_structure.png){fig-align='center' width=60%}\n:::\n:::\n\n\n[**Note:**]{.underline} R uses `ties = \"efron\"` by default, while SAS uses `ties = breslow` by default. If this argument remains unchanged in both software, it can cause differences in outcome. For more information, be sure to check the [CAMIS webpage](https://psiaims.github.io/CAMIS/Comp/r-sas_survival.html) on the comparison of Cox proportional hazards models in R and SAS.\n\n### Interpretation\n\nIn terms of interpretation, hazard ratios ($\\exp(\\beta_j)$) are often used when making inferences based on Cox proportional hazards models. Now, as you may remember from the overview presented earlier, it is important to recognize that each of the recurrent event models comes with a slightly different interpretation of the hazard ratio, as defined by the assumptions around the model.\n\n| | **AG** | **PWPtt** | **PWPgt** | **WLW** |\n|---------------|---------------|---------------|---------------|---------------|\n| **Hazard ratio (HR)** | risk of any recurrence | risk of any recurrence | risk of recurrence after previous event | risk of event of any type, not necessarily recurrent event |\n\nThis means that, for the `bladder` data, we can draw slightly different conclusions on the hazard ratio of the group treated with thiotepa (**rx** = 2) versus the placebo group (**rx** = 1).\n\n| Model | HR: rx2 vs rx1 | 95% CI | P-value |\n|-------------|----------------|----------------|---------|\n| AG | 0.631 | 0.381 to 1.047 | 0.0747 |\n| Original AG | 0.631 | 0.403 to 0.989 | 0.0447 |\n| PWPtt | 0.716 | 0.486 to 1.053 | 0.0898 |\n| PWPgt | 0.764 | 0.508 to 1.148 | 0.1952 |\n| WLW | 0.560 | 0.309 to 1.015 | 0.0560 |\n\nThese conclusions are:\n\n- **Andersen-Gill**: the risk of having *any new tumor recurrence* in the treatment group is 0.631 (0.381 - 1.047) times that of the placebo group\n\n- **Prentice-Williams-Peterson: total time**: the risk of having *any new tumor recurrence* in the treatment group is 0.716 (0.486 - 1.053) times that of the placebo group\n\n- **Prentice-Williams-Peterson: gap time**: the risk of having *a new tumor recurrence after a previous event* in the treatment group is 0.764 (0.508 - 1.148) times that of the placebo group\n\n- **Wei-Lin-Weissfeld**: the risk of having *any type of event* in the treatment group is 0.560 (0.309 - 1.015) times that of the placebo group\n\n[**Note:**]{.underline} The improved Andersen-Gill model (LWYY model or proportional means/rates model) is preferred over the original Andersen-Gill model.\n\n### Event-specific estimates\n\nFor the Prentice-Williams-Peterson and Wei-Lin-Weissfeld models we can incorporate both overall ($\\beta$) and event-specific ($\\beta_j$) effects for each covariate. To arrive at pooled model parameters these models assume that $\\beta_1 = \\beta_2 = ... = \\beta_k = \\beta$. Until now, we have only considered pooled model parameters, but given the underlying stratification of these two models in particular, it may be valuable to look into the event-specific estimates as well ([Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/)).\n\nTo get event-specific estimates for the treatment effect (**rx**), we first need to introduce four new **rx** variables to the `bladder2` and `bladder` datasets, one for each stratum.\n\n```sas\ndata bladder2;\n\tset bladder2;\n\trx_enum1 = rx*(enum=1);\n\trx_enum2 = rx*(enum=2);\n\trx_enum3 = rx*(enum=3);\n\trx_enum4 = rx*(enum=4);\nrun;\n```\n\n```sas\ndata bladder;\n\tset bladder;\n\trx_enum1 = rx*(enum=1);\n\trx_enum2 = rx*(enum=2);\n\trx_enum3 = rx*(enum=3);\n\trx_enum4 = rx*(enum=4);\nrun;\n```\n\nWith these four interaction variables, we need to specify `rx_enum1-rx_enum4` in the formula and set `class enum / param=glm;` to output the event-specific estimates.\n\n#### Prentice-Williams-Peterson model\n\n**Total time model**\n\n```sas\nproc phreg data=bladder2 covs(aggregate);\n\tclass enum / param=glm;\n\tmodel (tstart, tstop) * event(0) = rx_enum1-rx_enum4 size number /rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPtt_stratified.png){fig-align='center' width=80%}\n:::\n:::\n\n\n**Gap time model**\n\n```sas\nproc phreg data=bladder2 covs(aggregate);\n\tclass enum / param=glm;\n\tmodel gtime * event(0) = rx_enum1-rx_enum4 size number/rl; \n\tid subjid;\n\tstrata enum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_PWPgt_stratified.png){fig-align='center' width=80%}\n:::\n:::\n\n\n#### Wei-Lin-Weissfeld model\n\n```sas\nproc phreg data=bladder covs(aggregate);\n\tclass enum / param=glm;\n\tmodel tstop * event(0) = rx_enum1-rx_enum4 size number /rl;\n\tid subjid;\n strata enum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/recurrent_events/SAS_WLW_stratified.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## References\n\n[Amor 2023](https://www.lexjansen.com/phuse/2023/as/PAP_AS05.pdf). Eat, Sleep, R, Repeat.\n\n[Amorim & Cai 2015](https://pubmed.ncbi.nlm.nih.gov/25501468/). Modelling recurrent events: a tutorial for analysis in epidemiology. *International Journal of Epidemiology*. 2015 Feb;44(1):324-33.\n\n[Andersen & Gill 1982](https://www.jstor.org/stable/2240714). Cox’s Regression Model for Counting Processes: A Large Sample Study. *The Annals of Statistics*. 10(4):1100–1120.\n\n[bladder data](https://rdrr.io/cran/survival/man/bladder.html)\n\n[Lu & Shen 2018](https://www.lexjansen.com/pharmasug/2018/EP/PharmaSUG-2018-EP02.pdf). Application of Survival Analysis in Multiple Events Using SAS. *PharmaSUG 2018*.\n\n[Ozga et al. 2018](https://pubmed.ncbi.nlm.nih.gov/29301487/). A systematic comparison of recurrent event models for application to composite endpoints. *BMC Medical Research Methodoly*. 2018 Jan 4;18(1):2.\n\n[Prentice, Williams & Peterson 1981](https://www.jstor.org/stable/2335582). On the Regression Analysis of Multivariate Failure Time Data. *Biometrika*. 68(2):373–379.\n\n[survival package](https://cran.r-project.org/web/packages/survival/vignettes/survival.pdf)\n\n[Wei, Lin & Weissfeld 1989](https://www.jstor.org/stable/2290084). Regression Analysis of Multivariate Incomplete Failure Time Data by Modeling Marginal Distributions. *Journal of the American Statistical Association*. 84(408):1065–1073.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/rmst/execute-results/html.json b/_freeze/SAS/rmst/execute-results/html.json index cc8e6fe84..52a438970 100644 --- a/_freeze/SAS/rmst/execute-results/html.json +++ b/_freeze/SAS/rmst/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "de1644beb928be037656f04e29961733", + "hash": "45b6393a4386cf73ffb00264193eebe9", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Restricted Mean Survival Time (RMST) in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\nUnder the situation where you time to event outcome has non-proportional hazards over time, the commonly used Cox Proportional Hazards regression analysis and the log-rank test can be invalid - especially when the survival curves are crossing. One alternative is to analyse the Restricted Mean Survival Time (RMST).\n\nThere are a few ways in SAS to estimate the RMST. A parametric approach is to use general estimating equations(GEE) modelling using linear or log-linear link functions and the IPCW or pseudo-value approach as described in Methods 1 and 2 below. Alternatively you can use a non-parametric approach using an Area Under the Curve (AUC) calculated for the Kaplan-Meier curves.\n\nFor treatment comparisons, the RMST can be compared across treatments and it was recommended by the FDA in draft guidance in 2020 for analysis of Acute Myeloid Leukemia (AML) [here](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/acute-myeloid-leukemia-developing-drugs-and-biological-products-treatment). AML commonly has an initial higher rate of death following randomization, followed by a plateauing rate of death over time (Non-proportional hazards).\n\nThe main advantage of this method is its easy clinical interpretation (e.g. with an endpoint of time to death - we are measuring average time to death). The biggest disadvantage is you have to select a time at which to calculated the average over: this time is called `tau`. If data is not mature enough, you may get a unreliable result. In addition, one could accuse analysts of selecting `tau` such that you get the 'most significant' result in the direction you desire!\n\nReferences are found at the end of this page.\n\n### Common Mistakes doing RMST in SAS\n\n**Issue 1:** page 8615 of User's Guide for RMSTREG Procedure [here](https://support.sas.com/documentation/onlinedoc/stat/151/rmstreg.pdf) says it expects the event indicator (Status) to be 1=event (death time) and 0=censor. If you follow this guidance, then you must ensure that you use: `model time*status(0)` to ensure SAS knows that 0 is the censored observation.\n\nA common mistake is to get the (0) or (1) indicator the wrong way around. For example, it's common to use a `cnsr` variable which is set to 1=censored or 0=event. If this is used, then you must use\\\n`model time*cnsr(1)`! Always make sure you check the SAS output to verify your counts of Events and Censored observations is the correct way around.\n\nThroughout this page, we will use cnsr(1) approach.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/SASrmstreg2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n**Issue 2:** page 8616 of User's Guide for RMSTREG Procedure [here](https://support.sas.com/documentation/onlinedoc/stat/151/rmstreg.pdf) tells us that if we omit the option `tau=xx` then SAS sets `tau` using the largest **event** time. However, what SAS actually does is use the largest `time` from either events or censored observations.\n\nThe selection of `tau` for RMST analysis is very important. It's the period of time the average (or AUC for proc lifetest) is calculated over. If events are no longer occurring on both treatment groups, then you may not be looking at the key time period of interest. Therefore, it is better practice, to calculate `tau` yourself and set this as an option in the SAS code, (commonly based on the minimum time of the last event observed in each treatment group).\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/SASrmstreg1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Data used\n\nWe are using the lung_cancer.csv dataset found [here](CAMIS/data%20at%20main%20·%20PSIAIMS/CAMIS). If you tabulate the censoring flag variable `cnsr`\\\n(1: censored, 0=event), we have 165 events, and 63 censored values.\n\nWe only need the variables listed below\n\n- time - Time(days) to event\n\n- cnsr - 1=censored, 0=event\n\n- age - Age of subject\n\n- sex - 1=male, 2 = female\n\n- trt01pn - 1=Active, 2=Placebo\n\nFor example:\n\n| time | cnsr | trt01pn | age | sex |\n|------|------|---------|-----|-----|\n| 306 | 0 | 1 | 74 | 1 |\n| 455 | 0 | 1 | 68 | 1 |\n| 1010 | 1 | 1 | 56 | 1 |\n| 210 | 0 | 1 | 57 | 1 |\n\n## View your data - Kaplan-Meier Curves\n\nIt is good practice to first view the shape of your Kaplan-Meier curves. As you can see our treatment curves are crossing at approximately 300 days.\n\nIt is very important to pre-specify your approach for selection of `tau`. As you can see from the curves, if we compared the period 0 to 6 months, vs 0 to 18 months, we would get very different results for the treatment comparison.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc lifetest data=adcibc conftype=log;\n time time*cnsr(1);\n strata trt01pn;\nRun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/kmplot.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Setting tau\n\nThe code below calculates `tau` as the minimum time of the last event observed in each treatment group. This will be the period of time, our AUC will be calculated over. As cnsr=0 are events, we only select these observations. Below, the maximum event in treatment 1 = 883 days and in treatment 2 = 350 days. We therefore set `tau` = 350. This method avoids including in the AUC a period of time where events are no longer occurring in both treatments. You can see why setting `tau` is very important as we are likely to get very different AUCs calculating over the 350 day as opposed to the 883 day period!\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc sort data=lung_cancer (where=(cnsr=0)) out=tau1;\n by trt01pn time;\nrun;\n\ndata tau1 (keep=studyid trt01pn time);\n set tau1 ;\n by trt01pn time;\n if last.trt01pn then output;\nrun;\n\nproc sort data=tau1;\n by descending time;\nrun;\n\ndata tau2;\n set tau1 end=last;\n by descending time;\n if last then call symput(\"_tau\",put(time,best8.));\nrun;\n\n%put &_tau;\n350\n```\n:::\n\n\n## Method 1: Inverse Probability Censoring Weighting (IPCW) Estimation (proc rmstreg)\n\nThe Inverse probability censoring weighting estimation as per [Tian L, Zhao L, Wei LJ. Biostatistics 2014, 15, 222-233](https://pubmed.ncbi.nlm.nih.gov/24292992/) is found in SAS using `Proc rmstreg` and `method=ipcw`. This is using a generalized linear model (linear or log-linear options are available) to model right censored data over time. The estimation method uses : Generalized estimating equations (GEE).\n\nThis method uses Kaplan-Meier estimation to obtain weights and it has been shown that weighting in this way provides an unbiased estimate for an adjusted survival curve [Calkins 2018](https://pmc.ncbi.nlm.nih.gov/articles/PMC5845164/). See [Royston & Parmar (2013)](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/1471-2288-13-152) for more detail.\n\nTo calculate the stratified weights by treatment and use them in the iterative estimation process, you need to specify `strata=trt01pn`\n\nIn the output, its important to check that your event/censoring flag is the right was around and tau is set as expected. (165 events and tau=335)\n\n### Linear link model - provides estimates of treatment differences\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc rmstreg data=adcibc tau=&_tau;\n class trt01pn sex;\n model time*cnsr(1) =trt01pn sex age /link=linear method=ipcw (strata=trt01pn);\n lsmeans trt01pn/pdiff=control('2') cl alpha=0.05;\n ods output lsmeans=lsm diffs= diff;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/rmstreg_output1.png){fig-align='center' width=50%}\n:::\n:::\n\n\nThe above model results in a `restricted mean survival time` estimate of 257.16 days on treatment 1 vs 267.04 days on treatment 2. The difference (Active-Placebo) is -9.88 days (95% CI: -39.0 to 19.25, p=0.5061). Hence, there is no evidence of a difference between the treatments with respect to RMST when we look over the Kaplan-Meier plot from 0 to 350 days.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/rmstreg_output2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### Log link model - provides estimates of treatment ratios\n\nThe code is similar to above. We include the option `exp` on the lsmeans row, since this back transforms (exponentiates) the estimates of the RMST (and 95% CI) for each treatment and for the treatment difference back onto the days scale (rather than log(days) scale). NOTE: if you use the `ilink` option and link=log method, this does the same (given the link is log), however, does not back transform the treatment differences. It is currently believed that SAS is using the delta method from the treatment arm RMSTs, to estimate the standard error for the RMST ratio.\n\nSimilar to the linear model, we obtain results of a `restricted mean survival time` estimate of 255.21 days on treatment 1 vs 264.75 days on treatment 2. The difference (Active-Placebo) on the log scale is -0.03667 (95% CI: -0.1493 to 0.07596, p=0.5234) but this is hard to interpret. Hence, once back transformed, the treatment ratio (Active/Placebo) is 0.9640 (95% CI: 0.8613 to 1.0789, p=0.5234).\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc rmstreg data=adcibc tau=&_tau;\n class trt01pn sex;\n model time*cnsr(1) =trt01pn sex age /link=log method=ipcw (strata=trt01pn);\n lsmeans trt01pn/pdiff=control('2') cl alpha=0.05 exp;\n ods output lsmeans=lsm diffs= diff;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/rmstreg_output3.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Method 2: Pseudo Value Estimation (proc rmstreg)\n\nThe pseudo-observations method [Andersen, Hansen and Klein 2004](https://pubmed.ncbi.nlm.nih.gov/15690989/), is available in SAS using the method=pv option. You use the link=linear or link=log options and output is similarly interpreted as described for Method 1 IPCW method.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc rmstreg data=adcibc tau=&_tau;\n class trt01pn sex ;\n model time*cnsr(1) =trt01pn sex age /link=linear method=pv;\n lsmeans trt01pn/pdiff=control('2') cl alpha=0.05;\n ods output lsmeans=lsm diffs= diff;\nrun;\n```\n:::\n\n\n## Method 3: RMST Area under the curve method (proc lifetest)\n\nA non-parametric method to calculate the RMST is available using the AUC Kaplan-Meier curves.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc lifetest data=adcibc plots=(rmst s) rmst (tau=&_tau);\n time time*cnsr(1) ;\n strata trt01pn / diff=control('2') ;\nrun;\n```\n:::\n\n\nAs shown below, SAS only outputs the estimates and SEs. However, a 95% CI (assuming a normal distribution) can be calculated in an additional datastep using estimate +/- 1.96 \\* SE.\n\nThe AUC method results in a `restricted mean survival time` estimate of 248.2156 days on treatment 1 vs 272.9520 days on treatment 2. The difference (Active-Placebo) is -24.7364. Calculating 95% CIs, for this results in 95% CI: -54.3971 to 4.9243, p=0.1021). Hence, there is no evidence of a difference between the treatments with respect to RMST when we look over the Kaplan-Meier plot from 0 to 350 days.\n\nVarious multiple testing p-value adjustments are also available using this method.\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\n### References\n\n1. [On Biostatistics and Clinical trials Monday April 19 2021](https://onbiostatistics.blogspot.com/2021/04/restricted-mean-survival-time-rmst-for.html)\n2. [Royston & Parmar (2013)](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/1471-2288-13-152)\n3. [SAS User's Guide for RMSTREG Procedure](https://support.sas.com/documentation/onlinedoc/stat/151/rmstreg.pdf)\n4. [Analyzing RMST using SAS/STAT, Changbin Guo and Yu LIang, SAS Institute, Paper SAS3013-2019](https://support.sas.com/resources/papers/proceedings19/3013-20)\n5. [Huang & Kuan (2017)](https://onlinelibrary.wiley.com/doi/abs/10.1002/pst.1846)\n6. [Tian L, Zhao L, Wei LJ. Biostatistics 2014, 15, 222-233](https://pubmed.ncbi.nlm.nih.gov/24292992/)\n7. [FDA 2020 guidance](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/acute-myeloid-leukemia-developing-drugs-and-biological-products-treatment) and [download](https://www.fda.gov/media/140821/download)\n8. [Andersen, Hansen and Klein 2004](https://pubmed.ncbi.nlm.nih.gov/15690989/)\n9. [Calkins KL, Canan CE, Moore RD, Lesko CR, Lau B. An application of restricted mean survival time in a competing risks setting: comparing time to ART initiation by injection drug use. BMC Med Res Methodol. 2018;18:27. doi: 10.1186/s12874-018-0484-z](https://pmc.ncbi.nlm.nih.gov/articles/PMC5845164/)\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R rmst [?] \n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", + "markdown": "---\ntitle: \"Restricted Mean Survival Time (RMST) in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\n---\n\nUnder the situation where you time to event outcome has non-proportional hazards over time, the commonly used Cox Proportional Hazards regression analysis and the log-rank test can be invalid - especially when the survival curves are crossing. One alternative is to analyse the Restricted Mean Survival Time (RMST).\n\nThere are a few ways in SAS to estimate the RMST. A parametric approach is to use general estimating equations(GEE) modelling using linear or log-linear link functions and the IPCW or pseudo-value approach as described in Methods 1 and 2 below. Alternatively you can use a non-parametric approach using an Area Under the Curve (AUC) calculated for the Kaplan-Meier curves.\n\nFor treatment comparisons, the RMST can be compared across treatments and it was recommended by the FDA in draft guidance in 2020 for analysis of Acute Myeloid Leukemia (AML) [here](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/acute-myeloid-leukemia-developing-drugs-and-biological-products-treatment). AML commonly has an initial higher rate of death following randomization, followed by a plateauing rate of death over time (Non-proportional hazards).\n\nThe main advantage of this method is its easy clinical interpretation (e.g. with an endpoint of time to death - we are measuring average time to death). The biggest disadvantage is you have to select a time at which to calculated the average over: this time is called `tau`. If data is not mature enough, you may get a unreliable result. In addition, one could accuse analysts of selecting `tau` such that you get the 'most significant' result in the direction you desire!\n\nReferences are found at the end of this page.\n\n### Common Mistakes doing RMST in SAS\n\n**Issue 1:** page 8615 of User's Guide for RMSTREG Procedure [here](https://support.sas.com/documentation/onlinedoc/stat/151/rmstreg.pdf) says it expects the event indicator (Status) to be 1=event (death time) and 0=censor. If you follow this guidance, then you must ensure that you use: `model time*status(0)` to ensure SAS knows that 0 is the censored observation.\n\nA common mistake is to get the (0) or (1) indicator the wrong way around. For example, it's common to use a `cnsr` variable which is set to 1=censored or 0=event. If this is used, then you must use\\\n`model time*cnsr(1)`! Always make sure you check the SAS output to verify your counts of Events and Censored observations is the correct way around.\n\nThroughout this page, we will use cnsr(1) approach.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/SASrmstreg2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n**Issue 2:** page 8616 of User's Guide for RMSTREG Procedure [here](https://support.sas.com/documentation/onlinedoc/stat/151/rmstreg.pdf) tells us that if we omit the option `tau=xx` then SAS sets `tau` using the largest **event** time. However, what SAS actually does is use the largest `time` from either events or censored observations.\n\nThe selection of `tau` for RMST analysis is very important. It's the period of time the average (or AUC for proc lifetest) is calculated over. If events are no longer occurring on both treatment groups, then you may not be looking at the key time period of interest. Therefore, it is better practice, to calculate `tau` yourself and set this as an option in the SAS code, (commonly based on the minimum time of the last event observed in each treatment group).\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/SASrmstreg1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Data used\n\nWe are using the lung_cancer.csv dataset found [here](CAMIS/data%20at%20main%20·%20PSIAIMS/CAMIS). If you tabulate the censoring flag variable `cnsr`\\\n(1: censored, 0=event), we have 165 events, and 63 censored values.\n\nWe only need the variables listed below\n\n- time - Time(days) to event\n\n- cnsr - 1=censored, 0=event\n\n- age - Age of subject\n\n- sex - 1=male, 2 = female\n\n- trt01pn - 1=Active, 2=Placebo\n\nFor example:\n\n| time | cnsr | trt01pn | age | sex |\n|------|------|---------|-----|-----|\n| 306 | 0 | 1 | 74 | 1 |\n| 455 | 0 | 1 | 68 | 1 |\n| 1010 | 1 | 1 | 56 | 1 |\n| 210 | 0 | 1 | 57 | 1 |\n\n## View your data - Kaplan-Meier Curves\n\nIt is good practice to first view the shape of your Kaplan-Meier curves. As you can see our treatment curves are crossing at approximately 300 days.\n\nIt is very important to pre-specify your approach for selection of `tau`. As you can see from the curves, if we compared the period 0 to 6 months, vs 0 to 18 months, we would get very different results for the treatment comparison.\n\n```sas\nproc lifetest data=adcibc conftype=log;\n time time*cnsr(1);\n strata trt01pn;\nRun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/kmplot.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Setting tau\n\nThe code below calculates `tau` as the minimum time of the last event observed in each treatment group. This will be the period of time, our AUC will be calculated over. As cnsr=0 are events, we only select these observations. Below, the maximum event in treatment 1 = 883 days and in treatment 2 = 350 days. We therefore set `tau` = 350. This method avoids including in the AUC a period of time where events are no longer occurring in both treatments. You can see why setting `tau` is very important as we are likely to get very different AUCs calculating over the 350 day as opposed to the 883 day period!\n\n```sas\nproc sort data=lung_cancer (where=(cnsr=0)) out=tau1;\n by trt01pn time;\nrun;\n\ndata tau1 (keep=studyid trt01pn time);\n set tau1 ;\n by trt01pn time;\n if last.trt01pn then output;\nrun;\n\nproc sort data=tau1;\n by descending time;\nrun;\n\ndata tau2;\n set tau1 end=last;\n by descending time;\n if last then call symput(\"_tau\",put(time,best8.));\nrun;\n\n%put &_tau;\n350\n```\n\n## Method 1: Inverse Probability Censoring Weighting (IPCW) Estimation (proc rmstreg)\n\nThe Inverse probability censoring weighting estimation as per [Tian L, Zhao L, Wei LJ. Biostatistics 2014, 15, 222-233](https://pubmed.ncbi.nlm.nih.gov/24292992/) is found in SAS using `Proc rmstreg` and `method=ipcw`. This is using a generalized linear model (linear or log-linear options are available) to model right censored data over time. The estimation method uses : Generalized estimating equations (GEE).\n\nThis method uses Kaplan-Meier estimation to obtain weights and it has been shown that weighting in this way provides an unbiased estimate for an adjusted survival curve [Calkins 2018](https://pmc.ncbi.nlm.nih.gov/articles/PMC5845164/). See [Royston & Parmar (2013)](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/1471-2288-13-152) for more detail.\n\nTo calculate the stratified weights by treatment and use them in the iterative estimation process, you need to specify `strata=trt01pn`\n\nIn the output, its important to check that your event/censoring flag is the right was around and tau is set as expected. (165 events and tau=335)\n\n### Linear link model - provides estimates of treatment differences\n\n```sas\nproc rmstreg data=adcibc tau=&_tau;\n class trt01pn sex;\n model time*cnsr(1) =trt01pn sex age /link=linear method=ipcw (strata=trt01pn);\n lsmeans trt01pn/pdiff=control('2') cl alpha=0.05;\n ods output lsmeans=lsm diffs= diff;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/rmstreg_output1.png){fig-align='center' width=50%}\n:::\n:::\n\n\nThe above model results in a `restricted mean survival time` estimate of 257.16 days on treatment 1 vs 267.04 days on treatment 2. The difference (Active-Placebo) is -9.88 days (95% CI: -39.0 to 19.25, p=0.5061). Hence, there is no evidence of a difference between the treatments with respect to RMST when we look over the Kaplan-Meier plot from 0 to 350 days.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/rmstreg_output2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### Log link model - provides estimates of treatment ratios\n\nThe code is similar to above. We include the option `exp` on the lsmeans row, since this back transforms (exponentiates) the estimates of the RMST (and 95% CI) for each treatment and for the treatment difference back onto the days scale (rather than log(days) scale). NOTE: if you use the `ilink` option and link=log method, this does the same (given the link is log), however, does not back transform the treatment differences. It is currently believed that SAS is using the delta method from the treatment arm RMSTs, to estimate the standard error for the RMST ratio.\n\nSimilar to the linear model, we obtain results of a `restricted mean survival time` estimate of 255.21 days on treatment 1 vs 264.75 days on treatment 2. The difference (Active-Placebo) on the log scale is -0.03667 (95% CI: -0.1493 to 0.07596, p=0.5234) but this is hard to interpret. Hence, once back transformed, the treatment ratio (Active/Placebo) is 0.9640 (95% CI: 0.8613 to 1.0789, p=0.5234).\n\n```sas\nproc rmstreg data=adcibc tau=&_tau;\n class trt01pn sex;\n model time*cnsr(1) =trt01pn sex age /link=log method=ipcw (strata=trt01pn);\n lsmeans trt01pn/pdiff=control('2') cl alpha=0.05 exp;\n ods output lsmeans=lsm diffs= diff;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/rmstreg_output3.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Method 2: Pseudo Value Estimation (proc rmstreg)\n\nThe pseudo-observations method [Andersen, Hansen and Klein 2004](https://pubmed.ncbi.nlm.nih.gov/15690989/), is available in SAS using the method=pv option. You use the link=linear or link=log options and output is similarly interpreted as described for Method 1 IPCW method.\n\n```sas\nproc rmstreg data=adcibc tau=&_tau;\n class trt01pn sex ;\n model time*cnsr(1) =trt01pn sex age /link=linear method=pv;\n lsmeans trt01pn/pdiff=control('2') cl alpha=0.05;\n ods output lsmeans=lsm diffs= diff;\nrun;\n```\n\n## Method 3: RMST Area under the curve method (proc lifetest)\n\nA non-parametric method to calculate the RMST is available using the AUC Kaplan-Meier curves.\n\n```sas\nproc lifetest data=adcibc plots=(rmst s) rmst (tau=&_tau);\n time time*cnsr(1) ;\n strata trt01pn / diff=control('2') ;\nrun;\n```\n\nAs shown below, SAS only outputs the estimates and SEs. However, a 95% CI (assuming a normal distribution) can be calculated in an additional datastep using estimate +/- 1.96 \\* SE.\n\nThe AUC method results in a `restricted mean survival time` estimate of 248.2156 days on treatment 1 vs 272.9520 days on treatment 2. The difference (Active-Placebo) is -24.7364. Calculating 95% CIs, for this results in 95% CI: -54.3971 to 4.9243, p=0.1021). Hence, there is no evidence of a difference between the treatments with respect to RMST when we look over the Kaplan-Meier plot from 0 to 350 days.\n\nVarious multiple testing p-value adjustments are also available using this method.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/rmst/rmstreg_output4.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### References\n\n1. [On Biostatistics and Clinical trials Monday April 19 2021](https://onbiostatistics.blogspot.com/2021/04/restricted-mean-survival-time-rmst-for.html)\n2. [Royston & Parmar (2013)](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/1471-2288-13-152)\n3. [SAS User's Guide for RMSTREG Procedure](https://support.sas.com/documentation/onlinedoc/stat/151/rmstreg.pdf)\n4. [Analyzing RMST using SAS/STAT, Changbin Guo and Yu LIang, SAS Institute, Paper SAS3013-2019](https://support.sas.com/resources/papers/proceedings19/3013-20)\n5. [Huang & Kuan (2017)](https://onlinelibrary.wiley.com/doi/abs/10.1002/pst.1846)\n6. [Tian L, Zhao L, Wei LJ. Biostatistics 2014, 15, 222-233](https://pubmed.ncbi.nlm.nih.gov/24292992/)\n7. [FDA 2020 guidance](https://www.fda.gov/regulatory-information/search-fda-guidance-documents/acute-myeloid-leukemia-developing-drugs-and-biological-products-treatment) and [download](https://www.fda.gov/media/140821/download)\n8. [Andersen, Hansen and Klein 2004](https://pubmed.ncbi.nlm.nih.gov/15690989/)\n9. [Calkins KL, Canan CE, Moore RD, Lesko CR, Lau B. An application of restricted mean survival time in a competing risks setting: comparing time to ART initiation by injection drug use. BMC Med Res Methodol. 2018;18:27. doi: 10.1186/s12874-018-0484-z](https://pmc.ncbi.nlm.nih.gov/articles/PMC5845164/)\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R rmst [?] \n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/rounding/execute-results/html.json b/_freeze/SAS/rounding/execute-results/html.json index 6454b6a1b..262b1d744 100644 --- a/_freeze/SAS/rounding/execute-results/html.json +++ b/_freeze/SAS/rounding/execute-results/html.json @@ -1,15 +1,15 @@ { - "hash": "98ddfd266bf38a4833012790f646b159", + "hash": "3da45b6f911cdc50253b0ec17f1c110f", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Rounding in SAS\"\nexecute: \n eval: false\n---\n\nSAS provides two distinct rounding functions that handle tie-breaking (values exactly halfway between two numbers) differently:\n\n- **`round`**: Uses \"round half away from zero\" - 12.5 becomes 13, -12.5 becomes -13\n- **`rounde`**: Uses \"round half to even\" (banker's rounding) - 12.5 becomes 12, 13.5 becomes 14\n\nThe key difference appears when rounding values that are exactly halfway between two possible results.\n\n\n\n\n\n\n\n```txt\n\ndata XXX;\n input my_number;\n datalines;\n 2.2\n 3.99\n 1.2345\n 7.876\n 13.8739\n ;\n\ndata xxx2;\n set xxx;\n do decimal_places = 1, 2, 3;\n round_result = round(my_number, 10**(-decimal_places));\n rounde_result = rounde(my_number, decimal_places);\n output;\n end;\nrun;\n\nproc print data=xxx2;\nrun;\n```\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n\nThe `round` function can produce unexpected results due to floating-point precision limitations. This typically occurs with: - Very large numbers (beyond 15-16 significant digits) - Results of arithmetic operations that introduce small rounding errors - Numbers near the machine epsilon precision threshold\n\nThese precision issues are inherent to how computers store decimal numbers, not specific flaws in SAS.\n\n\n\n\n\n```txt\n\ndata floating_point_precision;\n /* Example 1: Large number precision loss */\n input_val = 32768.0156255;\n expected = 32768.015626;\n actual = round(input_val, 1e-6);\n difference = actual - expected;\n example = 'Large number';\n output;\n \n /* Example 2: Subtraction then rounding */\n input_val = 2048.1375 - 2048; /* = 0.1375 */\n expected = 0.138;\n actual = round(input_val, 1e-3);\n difference = actual - expected;\n example = 'After subtraction';\n output;\nrun;\n\nproc print data=floating_point_precision;\n format input_val expected actual difference 13.8;\n var example input_val expected actual difference;\nrun;\n```\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n\nThe following analysis systematically identifies cases where `round` fails by testing combinations of large integers and decimal fractions. Numbers with trailing digits near machine precision limits are most susceptible to incorrect rounding.\n\n\n\n\n\n```txt\n\ndata dum1;\n int1=0; output;\n do i=1 to 25;\n int1=2**i; output;\n end;\n keep int1;\nrun;\n\ndata dum2;\n do round_digits=1 to 7;\n *x.xxx5 should be rounded up, or replace 5 to 4.99 which should be rounded down;\n dec1=2**(-round_digits)+10**(-round_digits-1)*5;\n output;\n end;\n keep dec1 round_digits;\nrun;\n\nproc sql;\n create table incorrect_round2(where=(rounded\n\n```\n\n:::\n:::\n\n\nThis example demonstrates rounding behavior with results from common arithmetic operations (addition, subtraction, multiplication, division) across different precision levels, showing how accumulated floating-point errors can affect rounding accuracy.\n\n\n\n\n\n```txt\n\ndata simple;\n input num1 num2;\n datalines;\n 3.14159 2.71828\n 1.99999 0.33333\n 5.55555 4.44444\n 7.87654 1.23456\n 0.12345 9.87654\n 6.66666 3.33333\n ;\nrun;\n\ndata results;\n set simple;\n operator='+'; num3=num1+num2; output;\n operator='-'; num3=num1-num2; output;\n operator='*'; num3=num1*num2; output;\n operator='/'; num3=num1/num2; output;\nrun;\n\ndata final;\n set results;\n rounded_001 = round(num3, 0.001); /* Round to nearest 0.001 */\n rounded_01 = round(num3, 0.01); /* Round to nearest 0.01 */\n rounded_1 = round(num3, 0.1); /* Round to nearest 0.1 */\n rounded_int = round(num3, 1); /* Round to nearest integer */\nrun;\n\nproc print data=final;\n format num1 num2 num3 rounded_001 rounded_01 rounded_1 rounded_int 12.6;\nrun;\n```\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n\nDespite these floating-point precision issues with very large numbers, the `round` function remains reliable for most practical statistical applications. Users should be aware of potential precision limitations when working with numbers beyond 15-16 significant digits or results from complex arithmetic operations.\n\n**References**\n\n[SAS Documentation: ROUND Function](https://documentation.sas.com/doc/en/lefunctionsref/3.2/p0tj6cmga7p8qln1ejh6ebevm0c9.htm)\n\n[SAS Documentation: ROUNDE Function](https://documentation.sas.com/doc/en/lefunctionsref/3.2/n02h2rvq30k869n164ccmcwvhqyk.htm)\n\n[How to Round Numbers in SAS - SAS Example Code](https://sasexamplecode.com/how-to-round-numbers-in-sas/)", + "markdown": "---\ntitle: \"Rounding in SAS\"\n---\n\nSAS provides two distinct rounding functions that handle tie-breaking (values exactly halfway between two numbers) differently:\n\n- **`round`**: Uses \"round half away from zero\" - 12.5 becomes 13, -12.5 becomes -13\n- **`rounde`**: Uses \"round half to even\" (banker's rounding) - 12.5 becomes 12, 13.5 becomes 14\n\nThe key difference appears when rounding values that are exactly halfway between two possible results.\n\n\n\n\n\n\n\n```txt\n\ndata XXX;\n input my_number;\n datalines;\n 2.2\n 3.99\n 1.2345\n 7.876\n 13.8739\n ;\n\ndata xxx2;\n set xxx;\n do decimal_places = 1, 2, 3;\n round_result = round(my_number, 10**(-decimal_places));\n rounde_result = rounde(my_number, decimal_places);\n output;\n end;\nrun;\n\nproc print data=xxx2;\nrun;\n```\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n\nThe `round` function can produce unexpected results due to floating-point precision limitations. This typically occurs with: - Very large numbers (beyond 15-16 significant digits) - Results of arithmetic operations that introduce small rounding errors - Numbers near the machine epsilon precision threshold\n\nThese precision issues are inherent to how computers store decimal numbers, not specific flaws in SAS.\n\n\n\n\n\n```txt\n\ndata floating_point_precision;\n /* Example 1: Large number precision loss */\n input_val = 32768.0156255;\n expected = 32768.015626;\n actual = round(input_val, 1e-6);\n difference = actual - expected;\n example = 'Large number';\n output;\n \n /* Example 2: Subtraction then rounding */\n input_val = 2048.1375 - 2048; /* = 0.1375 */\n expected = 0.138;\n actual = round(input_val, 1e-3);\n difference = actual - expected;\n example = 'After subtraction';\n output;\nrun;\n\nproc print data=floating_point_precision;\n format input_val expected actual difference 13.8;\n var example input_val expected actual difference;\nrun;\n```\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n\nThe following analysis systematically identifies cases where `round` fails by testing combinations of large integers and decimal fractions. Numbers with trailing digits near machine precision limits are most susceptible to incorrect rounding.\n\n\n\n\n\n```txt\n\ndata dum1;\n int1=0; output;\n do i=1 to 25;\n int1=2**i; output;\n end;\n keep int1;\nrun;\n\ndata dum2;\n do round_digits=1 to 7;\n *x.xxx5 should be rounded up, or replace 5 to 4.99 which should be rounded down;\n dec1=2**(-round_digits)+10**(-round_digits-1)*5;\n output;\n end;\n keep dec1 round_digits;\nrun;\n\nproc sql;\n create table incorrect_round2(where=(rounded\n\n```\n\n:::\n:::\n\n\nThis example demonstrates rounding behavior with results from common arithmetic operations (addition, subtraction, multiplication, division) across different precision levels, showing how accumulated floating-point errors can affect rounding accuracy.\n\n\n\n\n\n```txt\n\ndata simple;\n input num1 num2;\n datalines;\n 3.14159 2.71828\n 1.99999 0.33333\n 5.55555 4.44444\n 7.87654 1.23456\n 0.12345 9.87654\n 6.66666 3.33333\n ;\nrun;\n\ndata results;\n set simple;\n operator='+'; num3=num1+num2; output;\n operator='-'; num3=num1-num2; output;\n operator='*'; num3=num1*num2; output;\n operator='/'; num3=num1/num2; output;\nrun;\n\ndata final;\n set results;\n rounded_001 = round(num3, 0.001); /* Round to nearest 0.001 */\n rounded_01 = round(num3, 0.01); /* Round to nearest 0.01 */\n rounded_1 = round(num3, 0.1); /* Round to nearest 0.1 */\n rounded_int = round(num3, 1); /* Round to nearest integer */\nrun;\n\nproc print data=final;\n format num1 num2 num3 rounded_001 rounded_01 rounded_1 rounded_int 12.6;\nrun;\n```\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n```\n\n:::\n:::\n\n\nDespite these floating-point precision issues with very large numbers, the `round` function remains reliable for most practical statistical applications. Users should be aware of potential precision limitations when working with numbers beyond 15-16 significant digits or results from complex arithmetic operations.\n\n**References**\n\n[SAS Documentation: ROUND Function](https://documentation.sas.com/doc/en/lefunctionsref/3.2/p0tj6cmga7p8qln1ejh6ebevm0c9.htm)\n\n[SAS Documentation: ROUNDE Function](https://documentation.sas.com/doc/en/lefunctionsref/3.2/n02h2rvq30k869n164ccmcwvhqyk.htm)\n\n[How to Round Numbers in SAS - SAS Example Code](https://sasexamplecode.com/how-to-round-numbers-in-sas/)", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], "includes": { "include-in-header": [ - "\n\n\n" + "\n" ] }, "engineDependencies": {}, diff --git a/_freeze/SAS/sample_s_StatXact_test_of_trends/execute-results/html.json b/_freeze/SAS/sample_s_StatXact_test_of_trends/execute-results/html.json index 89999527a..fd75118a8 100644 --- a/_freeze/SAS/sample_s_StatXact_test_of_trends/execute-results/html.json +++ b/_freeze/SAS/sample_s_StatXact_test_of_trends/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "7ffb4a4e31086830dd9ab569af56f880", + "hash": "c1df96e01820ba562955852390e37768", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Sample size for K ordered binomial populations - Cochran-Armitage trend test\"\nexecute: \n eval: false\n---\n\n\n\n### Cochran-Armitage trend test {.unnumbered}\n\nCochran-Armitage trend test (Cochran-Armitage Z-test) is used to check if there is a trend in proportions across levels of a categorical, ordered variable. It's mostly used to analyse data where one variable is a binomial and the other is an ordinal variable. Basically, the test checks if the proportions vary in a specific direction (increasing or decreasing) when the ordered variable changes.\n\nIn the examples below we will calculate the extract and asymptotic powers and sample size of the Cochran-Armitage trend test. Analysis is not available in SAS and need to be run in StatXact PROC (module to SAS from Cytel).\n\nThe below parameters are need for the calculations:\n\n- ω_i - dose level/exposure for he i-th group\n- n_i - number of subjects in i-th group\n- π_1 - baseline response probability\n- π_i - response (binomial) probability for i-th group\n- λ (lambda) - slope for the logit model, can be interpreted as change in the log-odds of response per unit increase in dose\n- α - significance level for one-sided trend test\n- β - required power\n\nIn StatXact we can directly specify all of the response probabilities, or we can specify the baseline probability and use the below logit model with prespecified slope lambda to derive the rest:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact_logit.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Example 1 - Power for dose finding pilot study {.unnumbered}\n\nLet's consider an example of a dose-finding phase I clinical trial of patients with advanced chronic disease. At the lowest dose level the response probability is known to be 0.001. The drug will be considered useful if the log odds of response increase by 0.5 per unit increase in dose (that defines the lambda). The study design assumes doubling the dose up to maximum of 16 units. Sample sizes of 10, 10, 10, 5 and 2, are proposed for the five dose levels, to restrict the total number of subjects at the two highest dose levels due to possible side effects. A one-sided Cochran-Armitage trend test at the 2.5% significance level will performed at the end of the study. What is the power?\n\nDesign parameter are as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact3.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nSAS code:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc sxpowerbin;\n tr/ex;\n palpha 0.025;\n k 5;\n H0 0.001;\n H1 logodds /val=0.5;\n scores 1 2 4 8 16;\n size1 10;\n size2 10;\n size3 10;\n size4 5;\n size5 2;\nrun;\n```\n:::\n\n\nOutput from StatXact and results:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact1.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nThe exact power is 48%, whilst the asymptotic power is 84%. Here, using the asymptotic power would have led to a false sense of security concerning the adequacy of the proposed sample sizes for carrying out this pilot study.\n\n#### Example 2 - Power for cohort study of effects of low dose radiation {.unnumbered}\n\nLet's consider an example of a long-term follow-up study of subjects exposed to low-dose radiation in Japan (adapted from Landis, Heyman and Koch, 1978). The cohort was partitioned into four groups based on average radiation exposures of 0, 5, 30 and 75 rads. There were 2500, 3600, 1450 and 410 subjects, respectively, in the four dose groups. Subjects were classified as responders if they died from leukemia and non-responders if they died from other causes. We want detect a trend parameter of 0.049 on the logit scale, given a background response rate of 1 in 10,000. A one-sided Cochran-Armitage trend test at the 5% significance level was performed at the end of the study. What was the power?\n\nDesign parameter are as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact5.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nSAS code:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc sxpowerbin;\n tr/ex dist_file=tr;\n palpha 0.05;\n k 4;\n H0 0.0001;\n H1 logodds /val=0.049;\n scores 0 5 30 75;\n size1 2500;\n size2 3600;\n size3 1450;\n size4 410;\nrun;\n```\n:::\n\n\nOutput from StatXact and results:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact4.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nAsymptotic power of the test is 77%, a considerable overestimate of the actual power, 60%.\n\n#### Example 3 - Sample size calculation for trend test {.unnumbered}\n\nLet's consider an example of the study where the design parameters are as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact6.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nWhat is the required sample size to achieve the power of 80% with the significance level 5%?\n\nSAS code:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc sxpowerbin ti =15;\n tr/ex;\n palpha 0.05;\n beta 0.8;\n k 5;\n H0 0.10;\n H1 user/val=0.13 0.16 0.19 0.22;\n scores 0 1 2 4 8;\nrun;\n```\n:::\n\n\n(option ti = 15 limits the maximum time of computation to 15 min)\n\nOutput from StatXact and results:\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\nSample size of 108 is needed to obtain the required power.\n\n#### References {.unnumbered}\n\nAll of the examples are adapted from StatXact 12 PROCs Manual.", + "markdown": "---\ntitle: \"Sample size for K ordered binomial populations - Cochran-Armitage trend test\"\n---\n\n\n\n### Cochran-Armitage trend test {.unnumbered}\n\nCochran-Armitage trend test (Cochran-Armitage Z-test) is used to check if there is a trend in proportions across levels of a categorical, ordered variable. It's mostly used to analyse data where one variable is a binomial and the other is an ordinal variable. Basically, the test checks if the proportions vary in a specific direction (increasing or decreasing) when the ordered variable changes.\n\nIn the examples below we will calculate the extract and asymptotic powers and sample size of the Cochran-Armitage trend test. Analysis is not available in SAS and need to be run in StatXact PROC (module to SAS from Cytel).\n\nThe below parameters are need for the calculations:\n\n- ω_i - dose level/exposure for he i-th group\n- n_i - number of subjects in i-th group\n- π_1 - baseline response probability\n- π_i - response (binomial) probability for i-th group\n- λ (lambda) - slope for the logit model, can be interpreted as change in the log-odds of response per unit increase in dose\n- α - significance level for one-sided trend test\n- β - required power\n\nIn StatXact we can directly specify all of the response probabilities, or we can specify the baseline probability and use the below logit model with prespecified slope lambda to derive the rest:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact_logit.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n#### Example 1 - Power for dose finding pilot study {.unnumbered}\n\nLet's consider an example of a dose-finding phase I clinical trial of patients with advanced chronic disease. At the lowest dose level the response probability is known to be 0.001. The drug will be considered useful if the log odds of response increase by 0.5 per unit increase in dose (that defines the lambda). The study design assumes doubling the dose up to maximum of 16 units. Sample sizes of 10, 10, 10, 5 and 2, are proposed for the five dose levels, to restrict the total number of subjects at the two highest dose levels due to possible side effects. A one-sided Cochran-Armitage trend test at the 2.5% significance level will performed at the end of the study. What is the power?\n\nDesign parameter are as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact3.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nSAS code:\n\n```sas\nproc sxpowerbin;\n tr/ex;\n palpha 0.025;\n k 5;\n H0 0.001;\n H1 logodds /val=0.5;\n scores 1 2 4 8 16;\n size1 10;\n size2 10;\n size3 10;\n size4 5;\n size5 2;\nrun;\n```\n\nOutput from StatXact and results:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact1.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nThe exact power is 48%, whilst the asymptotic power is 84%. Here, using the asymptotic power would have led to a false sense of security concerning the adequacy of the proposed sample sizes for carrying out this pilot study.\n\n#### Example 2 - Power for cohort study of effects of low dose radiation {.unnumbered}\n\nLet's consider an example of a long-term follow-up study of subjects exposed to low-dose radiation in Japan (adapted from Landis, Heyman and Koch, 1978). The cohort was partitioned into four groups based on average radiation exposures of 0, 5, 30 and 75 rads. There were 2500, 3600, 1450 and 410 subjects, respectively, in the four dose groups. Subjects were classified as responders if they died from leukemia and non-responders if they died from other causes. We want detect a trend parameter of 0.049 on the logit scale, given a background response rate of 1 in 10,000. A one-sided Cochran-Armitage trend test at the 5% significance level was performed at the end of the study. What was the power?\n\nDesign parameter are as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact5.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nSAS code:\n\n```sas\nproc sxpowerbin;\n tr/ex dist_file=tr;\n palpha 0.05;\n k 4;\n H0 0.0001;\n H1 logodds /val=0.049;\n scores 0 5 30 75;\n size1 2500;\n size2 3600;\n size3 1450;\n size4 410;\nrun;\n```\n\nOutput from StatXact and results:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact4.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nAsymptotic power of the test is 77%, a considerable overestimate of the actual power, 60%.\n\n#### Example 3 - Sample size calculation for trend test {.unnumbered}\n\nLet's consider an example of the study where the design parameters are as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact6.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nWhat is the required sample size to achieve the power of 80% with the significance level 5%?\n\nSAS code:\n\n```sas\nproc sxpowerbin ti =15;\n tr/ex;\n palpha 0.05;\n beta 0.8;\n k 5;\n H0 0.10;\n H1 user/val=0.13 0.16 0.19 0.22;\n scores 0 1 2 4 8;\nrun;\n```\n\n(option ti = 15 limits the maximum time of computation to 15 min)\n\nOutput from StatXact and results:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/samplesize/StatXact7.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nSample size of 108 is needed to obtain the required power.\n\n#### References {.unnumbered}\n\nAll of the examples are adapted from StatXact 12 PROCs Manual.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/sample_s_equivalence/execute-results/html.json b/_freeze/SAS/sample_s_equivalence/execute-results/html.json index 086617005..187fa50d6 100644 --- a/_freeze/SAS/sample_s_equivalence/execute-results/html.json +++ b/_freeze/SAS/sample_s_equivalence/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "6edc42328607e0564255fd4ea68f7977", + "hash": "d4d0eebad61eaaf79a6e73fbb68a1757", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Sample Size for Equivalence Trials in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\n# Introduction\n\nPROC POWER^1^ can be used for sample size calculations for equivalence testing^2^. Note that equivalence testing is different to bioequivalence testing. For bioequivalence see [TOST page](Sample%20Size%20Calculation%20for%20Average%20Bioequivalence). SAS can calculate equivalence testing sample size for:\n\n- One sample test: Is a mean equivalent to a target value, within a set tolerance\n\n- Two independent sample test: Comparison of means for equivalence, when the true mean difference is zero, but within a set tolerance.\n\n- One sample test: Is a proportion equivalent to a target value, within a set tolerance\n\n- Paired-sample test: Comparison of means for equivalence, within a set tolerance\n\n**PROC POWER CANNOT CALCULATE SAMPLE SIZE FOR:**\n\n- Two independent sample test: Comparison of means for equivalence, when the true mean difference is non-zero, and within a set tolerance. (It will give you a result, but this doesn't align to other software/books calculations)\n\n- Two independent sample test: Comparison of proportions for equivalence, when the true mean difference is zero or non-zero, and within a set tolerance\n\n# **One Sample Equivalence Test of a Mean**\n\nFor a mean $\\mu_1$, we are testing if $\\mu_1$ is equivalent to some value $\\theta$ within a tolerance of $\\delta$, in other words: $H_0: |\\mu_1| \\leq \\theta+\\delta$ versus $H_1: |\\mu1|>\\theta+\\delta$\n\nA reformulation of a treatment pill, needs to have a weight equivalent to a target value of $\\theta$ =130 mg. Weight is assumed normally distributed and an acceptable weight is between 110 mg and 150 mg, hence $\\delta=20mg$. The standard deviation of the weight is 50 mg. What sample size is needed assuming an alpha level of 5% with 80% power to conclude the weight is within the margin $\\delta$ (the tablet weight is equivalent to 130 milligram). The below shows a sample size of 55 pills is required.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC POWER ;\n onesamplemeans test=equiv \n lower = 110 \n upper = 150 \n mean = 130 \n stddev = 50 \n ntotal = . \n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS1mean.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# **Two Sample Equivalence Test of Means**\n\n## **Comparing two independent sample means for parallel design (unpaired samples) where true difference between treatments is believed to be zero and within a tolerance**\n\nIn the most common scenario, SDs are assumed known and the same in both treatment groups.\n\nFor a mean in group 1 of $\\mu_1$, and a mean in group 2 of $\\mu_2$, we are testing if the absolute difference between the treatments, $|\\mu_2-\\mu_1|$ is equivalent to some value $\\theta$, within a tolerance of $\\delta$. In this example $\\theta =0$, in other words:\n\n$H_0:|\\mu_2-\\mu_1| \\le \\theta+\\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\theta+\\delta$ (where $\\theta=0$ ) or\n\n$H_0:|\\mu_2-\\mu_1| \\le \\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\delta$\n\n### Example where $\\theta = 0$\n\nIt is anticipated that patients will have the same mean diastolic BP of 96 mmHg on both the new drug and the active comparator, hence $\\theta=0$. It is also anticipated that the SD ($\\sigma$) of the diastolic BP is approximately 8 mmHg. The decision is made by clinical to accept equivalence if the difference found between the treatments is less than 5 mmHg, hence $\\delta$=5. How many patients are required for an 80% power and an overall significance level of 5%?\n\nA total sample size of 90 is recommended, which equates to a sample size of 45 patients per treatment group. Notice how SAS asks for the `lower` and `upper` bounds, these are derived by using the meandiff $\\theta$+/- the acceptable equivalence limit $\\delta$ (which is stated as 5 mmHg above).\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC POWER ;\n twosamplemeans test=equiv_diff \n lower = -5 \n upper = 5 \n meandiff=0 \n stddev = 8 \n ntotal = . \n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS2mean_independant.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## **Comparing two independent sample means for parallel design (unpaired samples) where true difference between treatments is believed NOT to be zero and within a tolerance**\n\nIn the most common scenario, SDs are assumed known and the same in both treatment groups.\n\nFor a mean in group 1 of $\\mu_1$, and a mean in group 2 of $\\mu_2$, we are testing if the absolute difference between the treatments, $|\\mu_2-\\mu_1|$ is equivalent to some value $\\theta$ within a tolerance of $\\delta$, in other words:\\\n$H_0:|\\mu_2-\\mu_1| \\le \\theta+\\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\theta+\\delta$.\n\n### Example where $\\theta \\neq 0$\n\nIn this example, you would expect that you could apply similar code to above, but with `lower` & `upper` not equidistant from the mean difference, however, doing so does not give the right answer. If you consult SAS literature, it states that the below code is doing Two One-Sided Tests (TOST) Bioequivalence testing. To date, we have not been able to program equivalence with $\\theta \\neq 0$ in SAS and get a sample size that matches the literature.\n\nInstead of the hypothesis: $H_0:|\\mu_2-\\mu_1| \\le \\theta+\\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\theta+\\delta$.\n\nThe sample size calculation in this example is aligned to TOST hypotheses:\n\n$H_01: \\mu_2 - \\mu_1 \\ge -\\theta$ vs $H_11:\\mu_2-\\mu_1 \\lt -\\theta$\n\nAND $H_02: \\mu_2 - \\mu_1 \\le \\theta$ vs $H_12:\\mu_2-\\mu_1 \\gt \\theta$\n\nSee [TOST page](Sample%20Size%20Calculation%20for%20Average%20Bioequivalence) for more detail.\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients through a parallel design. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). For establishing equivalence, suppose the true mean difference is 0.01 (1%) and the equivalence limit is 0.05 (5%). Assuming SD = 0.1 (10%), how many patients are required for an 80% power and an overall significance level of 5%?\n\nBelow shows a sample size of 140 patients in Total (70 per treatment group).\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC POWER ;\n twosamplemeans test=equiv_diff \n lower = -0.04 \n upper = 0.06 \n meandiff = 0.01 \n stddev = 0.1 \n ntotal = . \n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS2mean_tost1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# **Comparing means for crossover design (paired)**\n\nHere we assume there is no carry-over effect and that the variance is known.\n\n## **Estimating the within patient variance and correlation.**\n\nIt is important to differentiate here between the within patient SD and the SD of the difference. We may need to recalculate one to the other, depending on the case.\n\nWith no carry-over, then an approximation could be: Variance of the difference = 2x Within Patient Variance. The variance within a patient can be estimated from the within subject residual mean square after fitting the model including visit, period and treatment. For example, using Proc mixed `repeated visit / r sub=usubjid`, and `ods select r`, gives you blocks of the estimate R matrix (covariances between residuals).\n\nSAS cannot do sample size in this scenario without also having the within subject correlation. It's common in this scenario to set this equivalent to 0.5. More investigation is required to determine why this is also need to be specified as in this scenario, the correlation should not be required.\n\n### Example\n\nLet's consider a standard standard two-sequence, two period crossover design for trials to establish therapeutic equivalence between a test drug and a standard therapy. The sponsor is interested in having an 80% power for establishing equivalence. Based on the results from previous trials, it is estimated that the variance (of the difference) is 0.2 (20%). Suppose that the true mean difference is -0.1 (-10%) and the equivalence limit is 0.25 (25%). What is the required sample size, assuming significance level of 5%?\n\nThe below shows a sample size of 8 patients is required.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC POWER;\n pairedmeans test=equiv_diff \n lower = -35 \n upper = 15 \n meandiff=-10 \n stddev = 20 \n npairs = . \n corr=0.5\n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS2crossovermeans.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### References\n\n1. [PROC POWER SAS online help](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_sect010.html)\n2. [Sample Size Calculation Using SAS® for equivalence](https://support.sas.com/kb/50/700.html#:~:text=Power%20analysis%20and%20sample%20size%20determination%20for%20equivalence,be%20conducted%20using%20PROC%20FREQ%20or%20PROC%20TTEST.)\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R sample_s_equivalence [?] \n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", + "markdown": "---\ntitle: \"Sample Size for Equivalence Trials in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\n---\n\n# Introduction\n\nPROC POWER^1^ can be used for sample size calculations for equivalence testing^2^. Note that equivalence testing is different to bioequivalence testing. For bioequivalence see [TOST page](Sample%20Size%20Calculation%20for%20Average%20Bioequivalence). SAS can calculate equivalence testing sample size for:\n\n- One sample test: Is a mean equivalent to a target value, within a set tolerance\n\n- Two independent sample test: Comparison of means for equivalence, when the true mean difference is zero, but within a set tolerance.\n\n- One sample test: Is a proportion equivalent to a target value, within a set tolerance\n\n- Paired-sample test: Comparison of means for equivalence, within a set tolerance\n\n**PROC POWER CANNOT CALCULATE SAMPLE SIZE FOR:**\n\n- Two independent sample test: Comparison of means for equivalence, when the true mean difference is non-zero, and within a set tolerance. (It will give you a result, but this doesn't align to other software/books calculations)\n\n- Two independent sample test: Comparison of proportions for equivalence, when the true mean difference is zero or non-zero, and within a set tolerance\n\n# **One Sample Equivalence Test of a Mean**\n\nFor a mean $\\mu_1$, we are testing if $\\mu_1$ is equivalent to some value $\\theta$ within a tolerance of $\\delta$, in other words: $H_0: |\\mu_1| \\leq \\theta+\\delta$ versus $H_1: |\\mu1|>\\theta+\\delta$\n\nA reformulation of a treatment pill, needs to have a weight equivalent to a target value of $\\theta$ =130 mg. Weight is assumed normally distributed and an acceptable weight is between 110 mg and 150 mg, hence $\\delta=20mg$. The standard deviation of the weight is 50 mg. What sample size is needed assuming an alpha level of 5% with 80% power to conclude the weight is within the margin $\\delta$ (the tablet weight is equivalent to 130 milligram). The below shows a sample size of 55 pills is required.\n\n```sas\nPROC POWER ;\n onesamplemeans test=equiv \n lower = 110 \n upper = 150 \n mean = 130 \n stddev = 50 \n ntotal = . \n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS1mean.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# **Two Sample Equivalence Test of Means**\n\n## **Comparing two independent sample means for parallel design (unpaired samples) where true difference between treatments is believed to be zero and within a tolerance**\n\nIn the most common scenario, SDs are assumed known and the same in both treatment groups.\n\nFor a mean in group 1 of $\\mu_1$, and a mean in group 2 of $\\mu_2$, we are testing if the absolute difference between the treatments, $|\\mu_2-\\mu_1|$ is equivalent to some value $\\theta$, within a tolerance of $\\delta$. In this example $\\theta =0$, in other words:\n\n$H_0:|\\mu_2-\\mu_1| \\le \\theta+\\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\theta+\\delta$ (where $\\theta=0$ ) or\n\n$H_0:|\\mu_2-\\mu_1| \\le \\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\delta$\n\n### Example where $\\theta = 0$\n\nIt is anticipated that patients will have the same mean diastolic BP of 96 mmHg on both the new drug and the active comparator, hence $\\theta=0$. It is also anticipated that the SD ($\\sigma$) of the diastolic BP is approximately 8 mmHg. The decision is made by clinical to accept equivalence if the difference found between the treatments is less than 5 mmHg, hence $\\delta$=5. How many patients are required for an 80% power and an overall significance level of 5%?\n\nA total sample size of 90 is recommended, which equates to a sample size of 45 patients per treatment group. Notice how SAS asks for the `lower` and `upper` bounds, these are derived by using the meandiff $\\theta$+/- the acceptable equivalence limit $\\delta$ (which is stated as 5 mmHg above).\n\n```sas\nPROC POWER ;\n twosamplemeans test=equiv_diff \n lower = -5 \n upper = 5 \n meandiff=0 \n stddev = 8 \n ntotal = . \n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS2mean_independant.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## **Comparing two independent sample means for parallel design (unpaired samples) where true difference between treatments is believed NOT to be zero and within a tolerance**\n\nIn the most common scenario, SDs are assumed known and the same in both treatment groups.\n\nFor a mean in group 1 of $\\mu_1$, and a mean in group 2 of $\\mu_2$, we are testing if the absolute difference between the treatments, $|\\mu_2-\\mu_1|$ is equivalent to some value $\\theta$ within a tolerance of $\\delta$, in other words:\\\n$H_0:|\\mu_2-\\mu_1| \\le \\theta+\\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\theta+\\delta$.\n\n### Example where $\\theta \\neq 0$\n\nIn this example, you would expect that you could apply similar code to above, but with `lower` & `upper` not equidistant from the mean difference, however, doing so does not give the right answer. If you consult SAS literature, it states that the below code is doing Two One-Sided Tests (TOST) Bioequivalence testing. To date, we have not been able to program equivalence with $\\theta \\neq 0$ in SAS and get a sample size that matches the literature.\n\nInstead of the hypothesis: $H_0:|\\mu_2-\\mu_1| \\le \\theta+\\delta$ versus $H_1: |\\mu_2-\\mu_1|\\gt \\theta+\\delta$.\n\nThe sample size calculation in this example is aligned to TOST hypotheses:\n\n$H_01: \\mu_2 - \\mu_1 \\ge -\\theta$ vs $H_11:\\mu_2-\\mu_1 \\lt -\\theta$\n\nAND $H_02: \\mu_2 - \\mu_1 \\le \\theta$ vs $H_12:\\mu_2-\\mu_1 \\gt \\theta$\n\nSee [TOST page](Sample%20Size%20Calculation%20for%20Average%20Bioequivalence) for more detail.\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients through a parallel design. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). For establishing equivalence, suppose the true mean difference is 0.01 (1%) and the equivalence limit is 0.05 (5%). Assuming SD = 0.1 (10%), how many patients are required for an 80% power and an overall significance level of 5%?\n\nBelow shows a sample size of 140 patients in Total (70 per treatment group).\n\n```sas\nPROC POWER ;\n twosamplemeans test=equiv_diff \n lower = -0.04 \n upper = 0.06 \n meandiff = 0.01 \n stddev = 0.1 \n ntotal = . \n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS2mean_tost1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# **Comparing means for crossover design (paired)**\n\nHere we assume there is no carry-over effect and that the variance is known.\n\n## **Estimating the within patient variance and correlation.**\n\nIt is important to differentiate here between the within patient SD and the SD of the difference. We may need to recalculate one to the other, depending on the case.\n\nWith no carry-over, then an approximation could be: Variance of the difference = 2x Within Patient Variance. The variance within a patient can be estimated from the within subject residual mean square after fitting the model including visit, period and treatment. For example, using Proc mixed `repeated visit / r sub=usubjid`, and `ods select r`, gives you blocks of the estimate R matrix (covariances between residuals).\n\nSAS cannot do sample size in this scenario without also having the within subject correlation. It's common in this scenario to set this equivalent to 0.5. More investigation is required to determine why this is also need to be specified as in this scenario, the correlation should not be required.\n\n### Example\n\nLet's consider a standard standard two-sequence, two period crossover design for trials to establish therapeutic equivalence between a test drug and a standard therapy. The sponsor is interested in having an 80% power for establishing equivalence. Based on the results from previous trials, it is estimated that the variance (of the difference) is 0.2 (20%). Suppose that the true mean difference is -0.1 (-10%) and the equivalence limit is 0.25 (25%). What is the required sample size, assuming significance level of 5%?\n\nThe below shows a sample size of 8 patients is required.\n\n```sas\nPROC POWER;\n pairedmeans test=equiv_diff \n lower = -35 \n upper = 15 \n meandiff=-10 \n stddev = 20 \n npairs = . \n corr=0.5\n power = 0.8 \n alpha = 0.05; \nRUN;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_equivalence/SAS2crossovermeans.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### References\n\n1. [PROC POWER SAS online help](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_sect010.html)\n2. [Sample Size Calculation Using SAS® for equivalence](https://support.sas.com/kb/50/700.html#:~:text=Power%20analysis%20and%20sample%20size%20determination%20for%20equivalence,be%20conducted%20using%20PROC%20FREQ%20or%20PROC%20TTEST.)\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R sample_s_equivalence [?] \n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/sample_s_noninferiority/execute-results/html.json b/_freeze/SAS/sample_s_noninferiority/execute-results/html.json index 9a758d910..ab69db207 100644 --- a/_freeze/SAS/sample_s_noninferiority/execute-results/html.json +++ b/_freeze/SAS/sample_s_noninferiority/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "b824a5317ad6734636613f305267f100", + "hash": "c530949a46002eddcd75bcb853c63a1c", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Sample Size for Non-Inferiority Trials in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\n# Introduction\n\nPROC POWER^1^ can be used for sample size calculations for non-inferiority testing. See ^2^ for explanation of non-inferiority and how to perform Sample size in SAS (including comparing proportions). Below we give 2 sample size examples for the following types of studies:\n\n- two-sample comparison of means for Non-inferiority (i.e. testing if one treatment mean is non-inferior to the another treatment mean).\n\n- Paired-sample comparison of means (i.e. 2 treatment means recorded on 1 group of patients are equivalent within a set tolerance)\n\n# **Two Sample Non-inferiority test: Comparing means for parallel design (unpaired)**\n\nThis example is a sample size calculation for the following hypotheses: $H_0:\\mu2-\\mu1\\le -\\theta$ versus $H_1: \\mu2-\\mu1\\gt -\\theta$.\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients through a parallel design. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). We will consider the situation where the intended trial is for testing noninferiority. For establishing it, suppose the true mean difference is 0 and the noninferiority margin is chosen to be -0.05 (-5%). Assuming SD = 0.1, how many patients are required for an 80% power and an overall significance level of 5%?\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC POWER; \n twosamplemeans \n test=equiv_diff \n lower = 91\n upper = 101 \n meandiff=96 \n stddev = 8\n ntotal = . \n power = 0.8\n alpha = 0.05;\nRUN;\n```\n:::\n\n\nAs shown below, a total sample size of 102 is recommended, which equates to 51 in each group.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_noninferiority/2_sample_parallelgroup_means.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# **Comparing means for crossover design (paired)**\n\nHere we assume there is no carry-over effect and that the variance is known. For more information see^3^. There is no obvious way in SAS to do cross over non-inferiority, however, given the one sided test, you can half the alpha using the equiv_diff option to give you the non-inferiority sample size.\n\n## **Estimating the within patient variance and correlation.**\n\nLet's consider a standard two-sequence, two period crossover design. Suppose that the sponsor is interested in showing non-inferiority of the test drug against the reference with the non-inferiority margin -20%. Assume power of 80%. Based on the results from previous trials, it is estimated that the variance (of the difference) is 0.2 (20%). Suppose that the true mean difference is -0.1 (-10%). What is the required sample size, assuming significance level of 5%?\n\nAlpha = 0.025 is used below, instead of 0.05 because you are doing non-inferiority (a one sided test). Note that this is still the sample size for alpha=0.05. The below shows a sample size of 13 patients is required.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\npairedmeans \ntest=equiv_diff\n lower = -0.3\n upper = 0.1\n meandiff = -0.1\n stddev = 0.2\n corr = 0.5\n alpha = 0.025\n npairs = .\n power = 0.8;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_noninferiority/2_sample_crossover_means.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### References\n\n1. [PROC POWER SAS online help](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_sect010.html)\n2. [Sample Size Calculation Using SAS® for non-inferiority](https://support.sas.com/kb/48/616.html)\n3. [Sample Size for Cross over non-inferiority](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_a0000001004.htm)\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R sample_s_noninferiority [?] \n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", + "markdown": "---\ntitle: \"Sample Size for Non-Inferiority Trials in SAS\"\ndate: last-modified\ndate-format: D MMMM, YYYY\n---\n\n# Introduction\n\nPROC POWER^1^ can be used for sample size calculations for non-inferiority testing. See ^2^ for explanation of non-inferiority and how to perform Sample size in SAS (including comparing proportions). Below we give 2 sample size examples for the following types of studies:\n\n- two-sample comparison of means for Non-inferiority (i.e. testing if one treatment mean is non-inferior to the another treatment mean).\n\n- Paired-sample comparison of means (i.e. 2 treatment means recorded on 1 group of patients are equivalent within a set tolerance)\n\n# **Two Sample Non-inferiority test: Comparing means for parallel design (unpaired)**\n\nThis example is a sample size calculation for the following hypotheses: $H_0:\\mu2-\\mu1\\le -\\theta$ versus $H_1: \\mu2-\\mu1\\gt -\\theta$.\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients through a parallel design. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). We will consider the situation where the intended trial is for testing noninferiority. For establishing it, suppose the true mean difference is 0 and the noninferiority margin is chosen to be -0.05 (-5%). Assuming SD = 0.1, how many patients are required for an 80% power and an overall significance level of 5%?\n\n```sas\nPROC POWER; \n twosamplemeans \n test=equiv_diff \n lower = 91\n upper = 101 \n meandiff=96 \n stddev = 8\n ntotal = . \n power = 0.8\n alpha = 0.05;\nRUN;\n```\n\nAs shown below, a total sample size of 102 is recommended, which equates to 51 in each group.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_noninferiority/2_sample_parallelgroup_means.png){fig-align='center' width=50%}\n:::\n:::\n\n\n# **Comparing means for crossover design (paired)**\n\nHere we assume there is no carry-over effect and that the variance is known. For more information see^3^. There is no obvious way in SAS to do cross over non-inferiority, however, given the one sided test, you can half the alpha using the equiv_diff option to give you the non-inferiority sample size.\n\n## **Estimating the within patient variance and correlation.**\n\nLet's consider a standard two-sequence, two period crossover design. Suppose that the sponsor is interested in showing non-inferiority of the test drug against the reference with the non-inferiority margin -20%. Assume power of 80%. Based on the results from previous trials, it is estimated that the variance (of the difference) is 0.2 (20%). Suppose that the true mean difference is -0.1 (-10%). What is the required sample size, assuming significance level of 5%?\n\nAlpha = 0.025 is used below, instead of 0.05 because you are doing non-inferiority (a one sided test). Note that this is still the sample size for alpha=0.05. The below shows a sample size of 13 patients is required.\n\n```sas\npairedmeans \ntest=equiv_diff\n lower = -0.3\n upper = 0.1\n meandiff = -0.1\n stddev = 0.2\n corr = 0.5\n alpha = 0.025\n npairs = .\n power = 0.8;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_noninferiority/2_sample_crossover_means.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### References\n\n1. [PROC POWER SAS online help](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_sect010.html)\n2. [Sample Size Calculation Using SAS® for non-inferiority](https://support.sas.com/kb/48/616.html)\n3. [Sample Size for Cross over non-inferiority](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_a0000001004.htm)\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R sample_s_noninferiority [?] \n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/sample_s_superiority/execute-results/html.json b/_freeze/SAS/sample_s_superiority/execute-results/html.json index 702c02538..3f31d9c24 100644 --- a/_freeze/SAS/sample_s_superiority/execute-results/html.json +++ b/_freeze/SAS/sample_s_superiority/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "bc90bbeec237e92def1eaa7bc2bff65f", + "hash": "df1f4e75eec35f27d1ea4367110b22f9", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Sample Size for Superiority Trials in SAS\"\noutput: html_document\ndate: last-modified\ndate-format: D MMMM, YYYY\nexecute: \n eval: false\n---\n\nSAS has 2 procedures for doing Sample size. A basic summary is provided here based on Jenny Cody's paper^1^ , but see the paper itself for more details. There are also many available options to best to consult SAS online support for [PROC POWER](PROC%20POWER:%20Simple%20AB/BA%20Crossover%20Designs%20::%20SAS/STAT(R)%209.3%20User's%20Guide)^2^ and [PROC GLMPOWER](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_glmpower_a0000000154.htm)^3^.\n\nPROC POWER is used for sample size calculations for tests such as:\n\n- t tests, equivalence tests, and confidence intervals for means,\n\n- tests, equivalence tests, and confidence intervals for binomial proportions,\n\n- multiple regression,\n\n- tests of correlation and partial correlation,\n\n- one-way analysis of variance,\n\n- rank tests for comparing two survival curves,\n\n- logistic regression with binary response,\n\n- Wilcoxon-Mann-Whitney (rank-sum) test (SAS, 2010).\n\nPROC GLMPOWER is used for sample size calculations for more complex linear models, and cover Type III tests and contrasts of fixed effects in univariate linear models with or without covariates. (SAS, 2011).\n\n### **Comparing means for parallel design (unpaired)**\n\nIn the most common scenario SDs are assumed known and the same in both treatment groups. Otherwise Student t distribution is used instead of the normal distribution. SAS follows the Satterthwaite method.\n\n#### Example: Sample size for comparison of 2 independant treatment group means with same known SDs\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). Suppose that a difference of 8% in the percent change of LDL-C is considered a clinically meaningful difference and that the standard deviation is assumed to be 15%. What sample size is required for a two-sided false positive rate of 5% and a power of 80%?\n\nThe code below estimates the sample size in SAS. NOTE: you can either specify the MEANDIFF=8 or if you know the separate group means X and Y, you can use GROUPMEANS =X\\|Y code instead. SAS also assume a default alpha level of 0.05, a 1:1 balanced randomization and a Normal distribution.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC POWER ;\n TWOSAMPLEMEANS TEST=DIFF\n MEANDIFF=8\n STDDEV=15\n NTOTAL=.\n POWER=0.8\n ;\nRUN;\n```\n:::\n\n\nAs shown below, a total sample size of 114 is recommended, which equates to 57 in each group.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_superiority/SAS2sampmeans.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### **Comparing means for crossover design (paired)**\n\nIt is important to differentiate here between the within patient SD and the SD of the difference. We may need to recalculate one to the other, depending on the case.\n\nVariance of the difference = 2x Variance within patient. Vardiff=2∗Varpatient\n\n#### Example\n\nWe wish to run an AB/BA single dose crossover to compare two brochodilators. The primary outcome is peak expiratory flow, and a clinically relevant difference of 30 l/min is sought with 80% power, the significance level is 5% and the best estimate of the within patient standard deviation is 32 l/min. What size of trial do we require? (After recalculating: 32∗2=45 and assuming no period effect and assuming between each pair of measurements on the same subject that we have a 0.5 correlation)\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nPROC POWER ;\n PAIREDMEANS TEST=DIFF\n NPAIRS=.\n corr=0.5\n MEANDIFF=30\n STDDEV=45\n POWER=0.8\n ;\nRUN;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_superiority/SAScrossovermeans.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### References\n\n1. [Sample Size Calculation Using SAS®, R, and nQuery Software, 2020 Jenna Cody, Paper 4675-2020](https://support.sas.com/resources/papers/proceedings20/4675-2020.pdf)\n2. PROC POWER SAS online help https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_sect010.htm\n3. PROC GLMPOWER SAS online helphttps://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_glmpower_a0000000154.htm\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R sample_s_superiority [?] \n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", + "markdown": "---\ntitle: \"Sample Size for Superiority Trials in SAS\"\noutput: html_document\ndate: last-modified\ndate-format: D MMMM, YYYY\n---\n\nSAS has 2 procedures for doing Sample size. A basic summary is provided here based on Jenny Cody's paper^1^ , but see the paper itself for more details. There are also many available options to best to consult SAS online support for [PROC POWER](PROC%20POWER:%20Simple%20AB/BA%20Crossover%20Designs%20::%20SAS/STAT(R)%209.3%20User's%20Guide)^2^ and [PROC GLMPOWER](https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_glmpower_a0000000154.htm)^3^.\n\nPROC POWER is used for sample size calculations for tests such as:\n\n- t tests, equivalence tests, and confidence intervals for means,\n\n- tests, equivalence tests, and confidence intervals for binomial proportions,\n\n- multiple regression,\n\n- tests of correlation and partial correlation,\n\n- one-way analysis of variance,\n\n- rank tests for comparing two survival curves,\n\n- logistic regression with binary response,\n\n- Wilcoxon-Mann-Whitney (rank-sum) test (SAS, 2010).\n\nPROC GLMPOWER is used for sample size calculations for more complex linear models, and cover Type III tests and contrasts of fixed effects in univariate linear models with or without covariates. (SAS, 2011).\n\n### **Comparing means for parallel design (unpaired)**\n\nIn the most common scenario SDs are assumed known and the same in both treatment groups. Otherwise Student t distribution is used instead of the normal distribution. SAS follows the Satterthwaite method.\n\n#### Example: Sample size for comparison of 2 independant treatment group means with same known SDs\n\nA client is interested in conducting a clinical trial to compare two cholesterol lowering agents for treatment of hypercholesterolemic patients. The primary efficacy parameter is a low-density lipidprotein cholesterol (LDL-C). Suppose that a difference of 8% in the percent change of LDL-C is considered a clinically meaningful difference and that the standard deviation is assumed to be 15%. What sample size is required for a two-sided false positive rate of 5% and a power of 80%?\n\nThe code below estimates the sample size in SAS. NOTE: you can either specify the MEANDIFF=8 or if you know the separate group means X and Y, you can use GROUPMEANS =X\\|Y code instead. SAS also assume a default alpha level of 0.05, a 1:1 balanced randomization and a Normal distribution.\n\n```sas\nPROC POWER ;\n TWOSAMPLEMEANS TEST=DIFF\n MEANDIFF=8\n STDDEV=15\n NTOTAL=.\n POWER=0.8\n ;\nRUN;\n```\n\nAs shown below, a total sample size of 114 is recommended, which equates to 57 in each group.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_superiority/SAS2sampmeans.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### **Comparing means for crossover design (paired)**\n\nIt is important to differentiate here between the within patient SD and the SD of the difference. We may need to recalculate one to the other, depending on the case.\n\nVariance of the difference = 2x Variance within patient. Vardiff=2∗Varpatient\n\n#### Example\n\nWe wish to run an AB/BA single dose crossover to compare two brochodilators. The primary outcome is peak expiratory flow, and a clinically relevant difference of 30 l/min is sought with 80% power, the significance level is 5% and the best estimate of the within patient standard deviation is 32 l/min. What size of trial do we require? (After recalculating: 32∗2=45 and assuming no period effect and assuming between each pair of measurements on the same subject that we have a 0.5 correlation)\n\n```sas\nPROC POWER ;\n PAIREDMEANS TEST=DIFF\n NPAIRS=.\n corr=0.5\n MEANDIFF=30\n STDDEV=45\n POWER=0.8\n ;\nRUN;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/sample_s_superiority/SAScrossovermeans.png){fig-align='center' width=50%}\n:::\n:::\n\n\n### References\n\n1. [Sample Size Calculation Using SAS®, R, and nQuery Software, 2020 Jenna Cody, Paper 4675-2020](https://support.sas.com/resources/papers/proceedings20/4675-2020.pdf)\n2. PROC POWER SAS online help https://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_power_sect010.htm\n3. PROC GLMPOWER SAS online helphttps://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_glmpower_a0000000154.htm\n\n### Version\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n R sample_s_superiority [?] \n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n R ── Package was removed from disk.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P08062020\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/summary-stats/execute-results/html.json b/_freeze/SAS/summary-stats/execute-results/html.json deleted file mode 100644 index 249ec2331..000000000 --- a/_freeze/SAS/summary-stats/execute-results/html.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "hash": "2e2319c71fad1011bb633012493af66a", - "result": { - "engine": "jupyter", - "markdown": "---\ntitle: \"Calculating Quantiles (percentiles) in SAS\"\nexecute: \n eval: false\n---\n\nPercentiles can be calculated in SAS using the UNIVARIATE procedure. The procedure has the option `PCTLDEF` which allows for five different percentile definitions to be used. The default is `PCTLDEF=5`, which uses the empirical distribution function to find percentiles.\n\nThis is how the 25th and 40th percentiles of `aval` in the dataset `adlb` could be calculated, using the default option for `PCTLDEF`. For quantiles, Q1= 25%, Q2=50%, Q3 = 75%, Q4=100%.\n\n```{sas}\nproc univariate data=adlb;\n var aval;\n output out=stats pctlpts=25 40 pctlpre=p;\nrun;\n```\n\nThe `pctlpre=p` option tells SAS the prefix to use in the output dataset for the percentile results. In the above example, SAS will create a dataset called `stats`, containing variables `p25` and `p40`.\n\n", - "supporting": [ - "summary-stats_files" - ], - "filters": [], - "includes": {} - } -} \ No newline at end of file diff --git a/_freeze/SAS/summary_skew_kurt/execute-results/html.json b/_freeze/SAS/summary_skew_kurt/execute-results/html.json index 177fa299d..cf1326bcd 100644 --- a/_freeze/SAS/summary_skew_kurt/execute-results/html.json +++ b/_freeze/SAS/summary_skew_kurt/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "3896aff5cee9f84129f55ce655123003", + "hash": "742512a5ea81de634b3552995f429813", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Skewness/Kurtosis\"\noutput: html_document\nexecute: \n eval: false\n---\n\n\n\n# **Skewness and Kurtosis SAS**\n\nIn SAS, Skewness and Kurtosis are usually calculated using `PROC MEANS`. The procedures can produce both statistics in the same call. The procedure provides options for different methodologies.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata dat;\n input team $ points assists;\n datalines;\n A 10 2\n A 17 5\n A 17 6\n A 18 3\n A 15 0\n B 10 2\n B 14 5\n B 13 4\n B 29 0\n B 25 2\n C 12 1\n C 30 1\n C 34 3\n C 12 4\n C 11 7\n ;\nrun;\n```\n:::\n\n\n## Procedures Examination {#sas}\n\nBy default, SAS `PROC MEANS` uses VARDEF option \"DF\". The other options are \"N\", \"WEIGHT\", and \"WDF. Note that the WEIGHT and WDF options produce no results, as weighted calculations are not supported in PROC MEANS for Skewness and Kurtosis.\n\nThe following shows the SAS documentation for the two measures.\n\n### Skewness\n\nThe [SAS documentation for Skewness](https://documentation.sas.com/doc/en/vdmmlcdc/8.1/casfedsql/p04x27b92gon3gn10e5y5ybxbvmi.htm) is provided here for convenience:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_skewness.png){fig-align='center' width=100%}\n:::\n:::\n\n\n### Kurtosis\n\nThe SAS documentation for Kurtosis is as follows:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_kurtosis.png){fig-align='center' width=100%}\n:::\n:::\n\n\n### VARDEF = DF {#df}\n\nSkewness and Kurtosis are commonly calculated in SAS as follows:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc means data=dat SKEWNESS KURTOSIS;\n var points;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_skewness_kurtosis1.png){fig-align='center' width=30%}\n:::\n:::\n\n\nThe above results correspond to the Type 2 methodology in R.\n\n### VARDEF = N {#n}\n\nThe N option produces the following results\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc means data=dat SKEWNESS KURTOSIS vardef = N;\n var points;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_skewness_kurtosis2.png){fig-align='center' width=30%}\n:::\n:::\n\n\nThe above results correspond to the Type 1 methodology in R.\n\n## Summary {#summary}\n\nSAS options provide for Type 1 and Type 2 Skewness and Kurtosis. Skewness Type 3 and Kurtosis Type 3 are not supported. Also Pearson's Kurtosis is not supported.", + "markdown": "---\ntitle: \"Skewness/Kurtosis\"\noutput: html_document\n---\n\n\n\n# **Skewness and Kurtosis SAS**\n\nIn SAS, Skewness and Kurtosis are usually calculated using `PROC MEANS`. The procedures can produce both statistics in the same call. The procedure provides options for different methodologies.\n\n### Data Used\n\nThe following data was used in this example.\n\n```sas\ndata dat;\n input team $ points assists;\n datalines;\n A 10 2\n A 17 5\n A 17 6\n A 18 3\n A 15 0\n B 10 2\n B 14 5\n B 13 4\n B 29 0\n B 25 2\n C 12 1\n C 30 1\n C 34 3\n C 12 4\n C 11 7\n ;\nrun;\n```\n\n## Procedures Examination {#sas}\n\nBy default, SAS `PROC MEANS` uses VARDEF option \"DF\". The other options are \"N\", \"WEIGHT\", and \"WDF. Note that the WEIGHT and WDF options produce no results, as weighted calculations are not supported in PROC MEANS for Skewness and Kurtosis.\n\nThe following shows the SAS documentation for the two measures.\n\n### Skewness\n\nThe [SAS documentation for Skewness](https://documentation.sas.com/doc/en/vdmmlcdc/8.1/casfedsql/p04x27b92gon3gn10e5y5ybxbvmi.htm) is provided here for convenience:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_skewness.png){fig-align='center' width=100%}\n:::\n:::\n\n\n### Kurtosis\n\nThe SAS documentation for Kurtosis is as follows:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_kurtosis.png){fig-align='center' width=100%}\n:::\n:::\n\n\n### VARDEF = DF {#df}\n\nSkewness and Kurtosis are commonly calculated in SAS as follows:\n\n```sas\nproc means data=dat SKEWNESS KURTOSIS;\n var points;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_skewness_kurtosis1.png){fig-align='center' width=30%}\n:::\n:::\n\n\nThe above results correspond to the Type 2 methodology in R.\n\n### VARDEF = N {#n}\n\nThe N option produces the following results\n\n```sas\nproc means data=dat SKEWNESS KURTOSIS vardef = N;\n var points;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/summarystats/sas_skewness_kurtosis2.png){fig-align='center' width=30%}\n:::\n:::\n\n\nThe above results correspond to the Type 1 methodology in R.\n\n## Summary {#summary}\n\nSAS options provide for Type 1 and Type 2 Skewness and Kurtosis. Skewness Type 3 and Kurtosis Type 3 are not supported. Also Pearson's Kurtosis is not supported.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/survey-stats-summary/execute-results/html.json b/_freeze/SAS/survey-stats-summary/execute-results/html.json index 1e412aea8..ca2694860 100644 --- a/_freeze/SAS/survey-stats-summary/execute-results/html.json +++ b/_freeze/SAS/survey-stats-summary/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "399a160edd735648802bd52e1a2e14c8", + "hash": "e1a1dd4239b9d976e85ce4cd30f8f1cd", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Survey Summary Statistics using SAS\"\nbibliography: ../Comp/survey-stats-summary.bib\nexecute: \n eval: false\n---\n\nWhen conducting large-scale trials on samples of the population, it can be necessary to use a more complex sampling design than a simple random sample.\n\n- **Weighting** – If smaller populations are sampled more heavily to increase precision, then it is necessary to weight these observations in the analysis.\n\n- **Finite population correction** – Larger samples of populations result in lower variability in comparison to smaller samples.\n\n- **Stratification** – Dividing a population into sub-groups and sampling from each group. This protects from obtaining a very poor sample (e.g. under or over-represented groups), can give samples of a known precision, and gives more precise estimates for population means and totals.\n\n- **Clustering** – Dividing a population into sub-groups, and only sampling certain groups. This gives a lower precision, however can be much more convenient and cheaper - for example if surveying school children you may only sample a subset of schools to avoid travelling to a school to interview a single child.\n\nAll of these designs need to be taken into account when calculating statistics, and when producing models. Only summary statistics are discussed in this document, and variances are calculated using the default Taylor series linearisation methods. For a more detailed introduction to survey statistics in SAS, see [@Lohr_2022] or [@SAS_2018].\n\nFor survey summary statistics in SAS, we can use the `SURVEYMEANS` and `SURVEYFREQ` procedures.\n\n# Simple Survey Designs\n\nWe will use the [API]((https://r-survey.r-forge.r-project.org/survey/html/api.html)) dataset [@API_2000], which contains a number of datasets based on different samples from a dataset of academic performance. Initially we will just cover the methodology with a simple random sample and a finite population correction to demonstrate functionality.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n
cdsstypenamesnamesnumdnamednumcnamecnumflagpcttestapi00api99targetgrowthsch.widecomp.impbothawardsmealsellyr.rndmobilityacs.k3acs.46acs.corepct.respnot.hsghsgsome.colcol.gradgrad.schavg.edfullemerenrollapi.stupwfpc
15739081534155HMcFarland HighMcFarland High1039McFarland Unified432Kern14NA984624481814NoYesNoNo4431NA6NANA2482443412731.91713547742930.976194
19642126066716EStowers (Cecil Stowers (Cecil B.) Elementary1124ABC Unified1Los Angeles18NA100878831NA47YesYesYesYes825NA151930NA974102343213.66901047842030.976194
30664493030640HBrea-Olinda HigBrea-Olinda High2868Brea-Olinda Unified79Orange29NA987347423-8NoNoNoNo1010NA7NANA2895592141243.7183181410128730.976194
19644516012744EAlameda ElementAlameda Elementary1273Downey Unified187Los Angeles18NA997726577115YesYesYesYes7025NA2323NANA100374014811.96851834229130.976194
40688096043293ESunnyside ElemeSunnyside Elementary4926San Luis Coastal Unified640San Luis Obispo39NA99739719420YesYesYesYes4312NA122029NA918212734103.17100021718930.976194
19734456014278ELos Molinos EleLos Molinos Elementary2463Hacienda la Puente Unif284Los Angeles18NA93835822NA13YesYesYesNo1619NA131929NA71182038343.96752025821130.976194
\n
\n```\n\n:::\n:::\n\n\n## Mean\n\nIf we want to calculate a mean of a variable in a dataset which has been obtained from a **s**imple **r**andom **s**ample such as `apisrs`, in SAS we can do the following (*nb. here `total=6194` is obtained from the constant `fpc` column, and provides the finite population correction*):\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc surveymeans data=apisrs total=6194 mean;\n var growth;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n Statistics\n\n Std Error\n Variable N Mean of Mean 95% CL for Mean\n ---------------------------------------------------------------------------------\n growth 200 31.900000 2.090493 27.7776382 36.0223618\n ---------------------------------------------------------------------------------\n```\n\n## Total\n\nTo calculate population totals, we can request the `sum`. However SAS requires the user to specify the weights, otherwise the totals will be incorrect. These weights in this case are equivalent to the total population size divided by the sample size:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata apisrs;\n set apisrs nobs=n;\n weight = fpc / n;\nrun;\n\nproc surveymeans data=apisrs total=6194 sum;\n var growth;\n weight weight;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n Sum of Weights 6194\n\n\n Statistics\n\n Std Error\nVariable Sum of Sum\n----------------------------------------\ngrowth 197589 12949\n----------------------------------------\n```\n\n## Ratios\n\nTo perform ratio analysis for means or proportions of analysis variables in SAS, we can use the following:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc surveymeans data=apisrs total=6194;\n ratio api00 / api99;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n Statistics\n\n Std Error\n Variable N Mean of Mean 95% CL for Mean\n ---------------------------------------------------------------------------------\n api00 200 656.585000 9.249722 638.344950 674.825050\n api99 200 624.685000 9.500304 605.950813 643.419187\n ---------------------------------------------------------------------------------\n\n\n Ratio Analysis\n\n Std\nNumerator Denominator N Ratio Error 95% CL for Ratio\n----------------------------------------------------------------------------------------------\napi00 api99 200 1.051066 0.003604 1.04395882 1.05817265\n----------------------------------------------------------------------------------------------\n```\n\n## Proportions\n\nTo calculate a proportion in SAS, we use the `PROC SURVEYFREQ`, in the simplest case below:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc surveyfreq data=apisrs total=6194;\n table 'sch.wide'n / cl;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYFREQ Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n Table of sch.wide\n\n Std Err of 95% Confidence Limits\n sch.wide Frequency Percent Percent for Percent\n -------------------------------------------------------------------------\n No 37 18.5000 2.7078 13.1604 23.8396\n Yes 163 81.5000 2.7078 76.1604 86.8396\n\n Total 200 100.0000 \n```\n\n## Quantiles\n\nTo calculate quantiles in SAS, we can use the `quantile` option to request specific quantiles, or can use keywords to request common quantiles (e.g. quartiles or the median). This will use Woodruff's method for confidence intervals, and a custom quantile method [@SAS_2018, pp. 9834].\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc surveymeans data=apisrs total=6194 quantile=(0.025 0.5 0.975);\n var growth;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n growth 2.5 -16.500000 1.755916 -19.962591 -13.037409\n 50 Median 26.500000 1.924351 22.705263 30.294737\n 97.5 99.000000 16.133827 67.184794 130.815206\n ---------------------------------------------------------------------------------\n```\n\n# Summary Statistics on Complex Survey Designs\n\nMuch of the previous examples and notes still stand for more complex survey designs, here we will demonstrate using a dataset from NHANES [@NHANES_2010], which uses both stratification and clustering:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
SDMVPSUSDMVSTRAWTMEC2YRHI_CHOLraceagecatRIAGENDR
18381528.7702(19,39]1
18414509.2803(0,19]1
28612041.6403(0,19]1
27521000.3403(59,Inf]2
18822633.5801(19,39]1
28574112.4912(39,59]2
\n
\n```\n\n:::\n:::\n\n\nTo produce means and standard quartiles for this sample, taking account of sample design, we can use the following:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc surveymeans data=nhanes mean quartiles;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n var HI_CHOL;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error\n Variable Mean of Mean\n ----------------------------------------\n HI_CHOL 0.112143 0.005446\n ----------------------------------------\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n HI_CHOL 25 Q1 0 0.024281 -0.0514730 0.05147298\n 50 Median 0 0.024281 -0.0514730 0.05147298\n 75 Q3 0 0.024281 -0.0514730 0.05147298\n ---------------------------------------------------------------------------------\n```\n\nTo produce an analysis of separate subpopulations in SAS we can use the `DOMAIN` statement (note: do not use the `BY` statement as it will not give statistically valid analysis), here we also request the design effect:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc surveymeans data=nhanes mean deff;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n var HI_CHOL;\n domain race;\nrun;\n```\n:::\n\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error Design\nVariable Mean of Mean Effect\n--------------------------------------------------------\nHI_CHOL 0.112143 0.005446 2.336725\n--------------------------------------------------------\n\n Statistics for race Domains\n\n Std Error Design\nrace Variable Mean of Mean Effect\n------------------------------------------------------------------------\n 1 HI_CHOL 0.101492 0.006246 1.082734\n 2 HI_CHOL 0.121649 0.006604 1.407822\n 3 HI_CHOL 0.078640 0.010385 2.091156\n 4 HI_CHOL 0.099679 0.024666 3.098290\n------------------------------------------------------------------------\n```\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", + "markdown": "---\ntitle: \"Survey Summary Statistics using SAS\"\nbibliography: ../Comp/survey-stats-summary.bib\n---\n\nWhen conducting large-scale trials on samples of the population, it can be necessary to use a more complex sampling design than a simple random sample.\n\n- **Weighting** – If smaller populations are sampled more heavily to increase precision, then it is necessary to weight these observations in the analysis.\n\n- **Finite population correction** – Larger samples of populations result in lower variability in comparison to smaller samples.\n\n- **Stratification** – Dividing a population into sub-groups and sampling from each group. This protects from obtaining a very poor sample (e.g. under or over-represented groups), can give samples of a known precision, and gives more precise estimates for population means and totals.\n\n- **Clustering** – Dividing a population into sub-groups, and only sampling certain groups. This gives a lower precision, however can be much more convenient and cheaper - for example if surveying school children you may only sample a subset of schools to avoid travelling to a school to interview a single child.\n\nAll of these designs need to be taken into account when calculating statistics, and when producing models. Only summary statistics are discussed in this document, and variances are calculated using the default Taylor series linearisation methods. For a more detailed introduction to survey statistics in SAS, see [@Lohr_2022] or [@SAS_2018].\n\nFor survey summary statistics in SAS, we can use the `SURVEYMEANS` and `SURVEYFREQ` procedures.\n\n# Simple Survey Designs\n\nWe will use the [API]((https://r-survey.r-forge.r-project.org/survey/html/api.html)) dataset [@API_2000], which contains a number of datasets based on different samples from a dataset of academic performance. Initially we will just cover the methodology with a simple random sample and a finite population correction to demonstrate functionality.\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n
cdsstypenamesnamesnumdnamednumcnamecnumflagpcttestapi00api99targetgrowthsch.widecomp.impbothawardsmealsellyr.rndmobilityacs.k3acs.46acs.corepct.respnot.hsghsgsome.colcol.gradgrad.schavg.edfullemerenrollapi.stupwfpc
15739081534155HMcFarland HighMcFarland High1039McFarland Unified432Kern14NA984624481814NoYesNoNo4431NA6NANA2482443412731.91713547742930.976194
19642126066716EStowers (Cecil Stowers (Cecil B.) Elementary1124ABC Unified1Los Angeles18NA100878831NA47YesYesYesYes825NA151930NA974102343213.66901047842030.976194
30664493030640HBrea-Olinda HigBrea-Olinda High2868Brea-Olinda Unified79Orange29NA987347423-8NoNoNoNo1010NA7NANA2895592141243.7183181410128730.976194
19644516012744EAlameda ElementAlameda Elementary1273Downey Unified187Los Angeles18NA997726577115YesYesYesYes7025NA2323NANA100374014811.96851834229130.976194
40688096043293ESunnyside ElemeSunnyside Elementary4926San Luis Coastal Unified640San Luis Obispo39NA99739719420YesYesYesYes4312NA122029NA918212734103.17100021718930.976194
19734456014278ELos Molinos EleLos Molinos Elementary2463Hacienda la Puente Unif284Los Angeles18NA93835822NA13YesYesYesNo1619NA131929NA71182038343.96752025821130.976194
\n
\n```\n\n:::\n:::\n\n\n## Mean\n\nIf we want to calculate a mean of a variable in a dataset which has been obtained from a **s**imple **r**andom **s**ample such as `apisrs`, in SAS we can do the following (*nb. here `total=6194` is obtained from the constant `fpc` column, and provides the finite population correction*):\n\n```sas\nproc surveymeans data=apisrs total=6194 mean;\n var growth;\nrun;\n```\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n Statistics\n\n Std Error\n Variable N Mean of Mean 95% CL for Mean\n ---------------------------------------------------------------------------------\n growth 200 31.900000 2.090493 27.7776382 36.0223618\n ---------------------------------------------------------------------------------\n```\n\n## Total\n\nTo calculate population totals, we can request the `sum`. However SAS requires the user to specify the weights, otherwise the totals will be incorrect. These weights in this case are equivalent to the total population size divided by the sample size:\n\n```sas\ndata apisrs;\n set apisrs nobs=n;\n weight = fpc / n;\nrun;\n\nproc surveymeans data=apisrs total=6194 sum;\n var growth;\n weight weight;\nrun;\n```\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n Sum of Weights 6194\n\n\n Statistics\n\n Std Error\nVariable Sum of Sum\n----------------------------------------\ngrowth 197589 12949\n----------------------------------------\n```\n\n## Ratios\n\nTo perform ratio analysis for means or proportions of analysis variables in SAS, we can use the following:\n\n```sas\nproc surveymeans data=apisrs total=6194;\n ratio api00 / api99;\nrun;\n```\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n Statistics\n\n Std Error\n Variable N Mean of Mean 95% CL for Mean\n ---------------------------------------------------------------------------------\n api00 200 656.585000 9.249722 638.344950 674.825050\n api99 200 624.685000 9.500304 605.950813 643.419187\n ---------------------------------------------------------------------------------\n\n\n Ratio Analysis\n\n Std\nNumerator Denominator N Ratio Error 95% CL for Ratio\n----------------------------------------------------------------------------------------------\napi00 api99 200 1.051066 0.003604 1.04395882 1.05817265\n----------------------------------------------------------------------------------------------\n```\n\n## Proportions\n\nTo calculate a proportion in SAS, we use the `PROC SURVEYFREQ`, in the simplest case below:\n\n```sas\nproc surveyfreq data=apisrs total=6194;\n table 'sch.wide'n / cl;\nrun;\n```\n\n``` default\n The SURVEYFREQ Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n Table of sch.wide\n\n Std Err of 95% Confidence Limits\n sch.wide Frequency Percent Percent for Percent\n -------------------------------------------------------------------------\n No 37 18.5000 2.7078 13.1604 23.8396\n Yes 163 81.5000 2.7078 76.1604 86.8396\n\n Total 200 100.0000 \n```\n\n## Quantiles\n\nTo calculate quantiles in SAS, we can use the `quantile` option to request specific quantiles, or can use keywords to request common quantiles (e.g. quartiles or the median). This will use Woodruff's method for confidence intervals, and a custom quantile method [@SAS_2018, pp. 9834].\n\n```sas\nproc surveymeans data=apisrs total=6194 quantile=(0.025 0.5 0.975);\n var growth;\nrun;\n```\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Observations 200\n\n\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n growth 2.5 -16.500000 1.755916 -19.962591 -13.037409\n 50 Median 26.500000 1.924351 22.705263 30.294737\n 97.5 99.000000 16.133827 67.184794 130.815206\n ---------------------------------------------------------------------------------\n```\n\n# Summary Statistics on Complex Survey Designs\n\nMuch of the previous examples and notes still stand for more complex survey designs, here we will demonstrate using a dataset from NHANES [@NHANES_2010], which uses both stratification and clustering:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
SDMVPSUSDMVSTRAWTMEC2YRHI_CHOLraceagecatRIAGENDR
18381528.7702(19,39]1
18414509.2803(0,19]1
28612041.6403(0,19]1
27521000.3403(59,Inf]2
18822633.5801(19,39]1
28574112.4912(39,59]2
\n
\n```\n\n:::\n:::\n\n\nTo produce means and standard quartiles for this sample, taking account of sample design, we can use the following:\n\n```sas\nproc surveymeans data=nhanes mean quartiles;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n var HI_CHOL;\nrun;\n```\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error\n Variable Mean of Mean\n ----------------------------------------\n HI_CHOL 0.112143 0.005446\n ----------------------------------------\n\n\n Quantiles\n\n Std\n Variable Percentile Estimate Error 95% Confidence Limits\n ---------------------------------------------------------------------------------\n HI_CHOL 25 Q1 0 0.024281 -0.0514730 0.05147298\n 50 Median 0 0.024281 -0.0514730 0.05147298\n 75 Q3 0 0.024281 -0.0514730 0.05147298\n ---------------------------------------------------------------------------------\n```\n\nTo produce an analysis of separate subpopulations in SAS we can use the `DOMAIN` statement (note: do not use the `BY` statement as it will not give statistically valid analysis), here we also request the design effect:\n\n```sas\nproc surveymeans data=nhanes mean deff;\n cluster SDMVPSU;\n strata SDMVSTRA;\n weight WTMEC2YR;\n var HI_CHOL;\n domain race;\nrun;\n```\n\n``` default\n The SURVEYMEANS Procedure\n\n Data Summary\n\n Number of Strata 15\n Number of Clusters 31\n Number of Observations 8591\n Sum of Weights 276536446\n\n\n Statistics\n\n Std Error Design\nVariable Mean of Mean Effect\n--------------------------------------------------------\nHI_CHOL 0.112143 0.005446 2.336725\n--------------------------------------------------------\n\n Statistics for race Domains\n\n Std Error Design\nrace Variable Mean of Mean Effect\n------------------------------------------------------------------------\n 1 HI_CHOL 0.101492 0.006246 1.082734\n 2 HI_CHOL 0.121649 0.006604 1.407822\n 3 HI_CHOL 0.078640 0.010385 2.091156\n 4 HI_CHOL 0.099679 0.024666 3.098290\n------------------------------------------------------------------------\n```\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/survival/execute-results/html.json b/_freeze/SAS/survival/execute-results/html.json index 2229b97ca..5cd2abb86 100644 --- a/_freeze/SAS/survival/execute-results/html.json +++ b/_freeze/SAS/survival/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "f137f7486f89a239a31fc931c70e21b0", + "hash": "8e5f72b992752756d5fb57895e3a6504", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Survival Analysis Using SAS\"\nexecute: \n eval: false\n---\n\nThe most commonly used survival analysis methods in clinical trials include:\n\n- Kaplan-Meier (KM) estimators: non-parametric statistics utilized for estimating the survival function\n\n- Log-rank test: a non-parametric test for comparing the survival functions across two or more groups\n\n- Cox proportional hazards (PH) model: a semi-parametric model often used to assess the relationship between the survival time and explanatory variables\n\nAdditionally, other methods for analyzing time-to-event data are available, such as:\n\n- Parametric survival model\n\n- Accelerated failure time model\n\n- Competing risk model\n\n- Restricted mean survival time\n\n- Time-dependent Cox model\n\nWhile these models may be explored in a separate document, this particular document focuses solely on the three most prevalent methods: KM estimators, log-rank test and Cox PH model.\n\n# Analysis of Time-to-event Data\n\nBelow is a standard mock-up for survival analysis in clinical trials.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/layout.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Example Data\n\nData source: https://stats.idre.ucla.edu/sas/seminars/sas-survival/\n\nThe data include 500 subjects from the Worcester Heart Attack Study. This study examined several factors, such as age, gender and BMI, that may influence survival time after heart attack. Follow up time for all participants begins at the time of hospital admission after heart attack and ends with death or loss to follow up (censoring). The variables used here are:\n\n- lenfol: length of followup, terminated either by death or censoring - time variable\n\n- fstat: loss to followup = 0, death = 1 - censoring variable\n\n- afb: atrial fibrillation, no = 0, 1 = yes - explanatory variable\n\n- gender: males = 0, females = 1 - stratification factor\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nlibname mylib \"..\\data\";\n\ndata dat;\n set mylib.whas500;\n lenfoly = round(lenfol/365.25, 0.01); /* change follow-up days to years for better visualization*/\nrun;\n```\n:::\n\n\n## The Non-stratified Model\n\nFirst we try a non-stratified analysis following the mock-up above to describe the association between survival time and afb (atrial fibrillation).\n\nThe KM estimators and log-rank test are from `PROC LIFETEST`, and Cox PH model is conducted using `PROC PHREG`.\n\n### KM estimators and log-rank test\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc lifetest data=dat outsurv=_SurvEst timelist= 1 3 5 reduceout stderr; \n time lenfoly*fstat(0);\n strata afb;\nrun;\n```\n:::\n\n\nThe landmark estimates and quartile estimates for AFB = 0 group are as shown in below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/sas_km_afib0.png){fig-align='center' width=75%}\n:::\n:::\n\n\nThe logrank test result is in below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/sas_logrank.png){fig-align='center' width=75%}\n:::\n:::\n\n\n### Cox PH model\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data = dat;\n class afb;\n model lenfol*fstat(0) = afb/rl;\nrun;\n```\n:::\n\n\nThe hazard ratio and confidence intervals are shown as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/sas_cox.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## The Stratified Model\n\nIn a stratified model, the Kaplan-Meier estimators remain the same as those in the non-stratified model. To implement stratified log-rank tests and Cox proportional hazards models, simply add the `STRATA` option in both `PROC LIFETEST` and `PROC PHREG`.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n# KM estimators and log-rank test\nproc lifetest data=dat;\n time lenfoly*fstat(0);\n strata gender/group = afb;\nrun;\n\n# Cox PH model\nproc phreg data=dat;\n class afb;\n model lenfol*fstat(0) = afb/rl;\n strata gender;\nrun;\n```\n:::\n", + "markdown": "---\ntitle: \"Survival Analysis Using SAS\"\n---\n\nThe most commonly used survival analysis methods in clinical trials include:\n\n- Kaplan-Meier (KM) estimators: non-parametric statistics utilized for estimating the survival function\n\n- Log-rank test: a non-parametric test for comparing the survival functions across two or more groups\n\n- Cox proportional hazards (PH) model: a semi-parametric model often used to assess the relationship between the survival time and explanatory variables\n\nAdditionally, other methods for analyzing time-to-event data are available, such as:\n\n- Parametric survival model\n\n- Accelerated failure time model\n\n- Competing risk model\n\n- Restricted mean survival time\n\n- Time-dependent Cox model\n\nWhile these models may be explored in a separate document, this particular document focuses solely on the three most prevalent methods: KM estimators, log-rank test and Cox PH model.\n\n# Analysis of Time-to-event Data\n\nBelow is a standard mock-up for survival analysis in clinical trials.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/layout.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## Example Data\n\nData source: https://stats.idre.ucla.edu/sas/seminars/sas-survival/\n\nThe data include 500 subjects from the Worcester Heart Attack Study. This study examined several factors, such as age, gender and BMI, that may influence survival time after heart attack. Follow up time for all participants begins at the time of hospital admission after heart attack and ends with death or loss to follow up (censoring). The variables used here are:\n\n- lenfol: length of followup, terminated either by death or censoring - time variable\n\n- fstat: loss to followup = 0, death = 1 - censoring variable\n\n- afb: atrial fibrillation, no = 0, 1 = yes - explanatory variable\n\n- gender: males = 0, females = 1 - stratification factor\n\n```sas\nlibname mylib \"..\\data\";\n\ndata dat;\n set mylib.whas500;\n lenfoly = round(lenfol/365.25, 0.01); /* change follow-up days to years for better visualization*/\nrun;\n```\n\n## The Non-stratified Model\n\nFirst we try a non-stratified analysis following the mock-up above to describe the association between survival time and afb (atrial fibrillation).\n\nThe KM estimators and log-rank test are from `PROC LIFETEST`, and Cox PH model is conducted using `PROC PHREG`.\n\n### KM estimators and log-rank test\n\n```sas\nproc lifetest data=dat outsurv=_SurvEst timelist= 1 3 5 reduceout stderr; \n time lenfoly*fstat(0);\n strata afb;\nrun;\n```\n\nThe landmark estimates and quartile estimates for AFB = 0 group are as shown in below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/sas_km_afib0.png){fig-align='center' width=75%}\n:::\n:::\n\n\nThe logrank test result is in below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/sas_logrank.png){fig-align='center' width=75%}\n:::\n:::\n\n\n### Cox PH model\n\n```sas\nproc phreg data = dat;\n class afb;\n model lenfol*fstat(0) = afb/rl;\nrun;\n```\n\nThe hazard ratio and confidence intervals are shown as below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival/sas_cox.png){fig-align='center' width=75%}\n:::\n:::\n\n\n## The Stratified Model\n\nIn a stratified model, the Kaplan-Meier estimators remain the same as those in the non-stratified model. To implement stratified log-rank tests and Cox proportional hazards models, simply add the `STRATA` option in both `PROC LIFETEST` and `PROC PHREG`.\n\n```sas\n# KM estimators and log-rank test\nproc lifetest data=dat;\n time lenfoly*fstat(0);\n strata gender/group = afb;\nrun;\n\n# Cox PH model\nproc phreg data=dat;\n class afb;\n model lenfol*fstat(0) = afb/rl;\n strata gender;\nrun;\n```", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/survival_cif/execute-results/html.json b/_freeze/SAS/survival_cif/execute-results/html.json index 634ba8250..ffd273bb9 100644 --- a/_freeze/SAS/survival_cif/execute-results/html.json +++ b/_freeze/SAS/survival_cif/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "ecb504497a2844eecdd73b9198450627", + "hash": "8ac33956dd65bac1f97ba4c12ffe3772", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Estimating Cumulative Incidence Functions Using SAS\"\nexecute: \n eval: false\n---\n\n## Objective\n\nIn this document we present how to estimate the cumulative incidence function (CIF) in SAS (version 9.4). We focus on the competing risks model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups: ALL, AML-Low Risk, AML-High Risk.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days. This variable is not used here.\n\n- A new variable `ID` is created.\n\nSAS code to prepare the data:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc format;\n value DiseaseGroup 1='ALL'\n 2='AML-Low Risk'\n 3='AML-High Risk';\n value EventStatus 0='Censored'\n 1='Relapse'\n 2='Death';\nrun;\n\nlibname datalib \"..\\data\";\ndata bmt;\n set datalib.bmt;\n TYears = T / 365.25;\n ID = _n_;\n format Group DiseaseGroup.;\n format Status EventStatus.;\nrun;\n```\n:::\n\n\n## Estimating CIFs in SAS\n\nPROC LIFETEST is used to estimate the CIFs in SAS. For illustration, we model the time to relapse.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nods graphics on;\nproc lifetest data=bmt \n plots=cif(test) \n error=aalen\n conftype=loglog\n outcif=cif1 \n timelist=0.5 1 1.5 2 3; \n time Tyears * Status(0) / eventcode=1; \n strata Group / order=internal; \n format Group DiseaseGroup.;\nrun; \nods graphics off;\n```\n:::\n\n\nBelow are selected outputs for comparison with the R outputs in the companion document.\n\nCIF estimates for time to relapse at selected timepoints for 'AML-Low Risk' patients:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cifSAS.jpg){fig-align='center' width=75%}\n:::\n:::\n\n\nCIF estimates for time to relapses:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cifPlot.png){fig-align='center' width=75%}\n:::\n:::\n\n\nTwo points to note:\n\n1. By default the variance of the estimated CIF are estimated with Aalen's asymptotic method. This can be changed to the delta method by setting `error=delta` in the PROC LIFETEST statement.\n\n2. By default the log-log transformation is used to produce the pointwise confidence intervals (CIs) for the estimated CIFs. To select other methods, for instance log, set `conftype=log`.\n\n## Reference\n\nAalen O. (1978). Nonparametric Estimation of Partial Transition Probabilities in Multiple Decrement Models, *Annals of Statistics*, 6:534-545.\n\nGray R. (1988). A Class of K-Sample Tests for Comparing the Cumulative Incidence of a Competing Risk, *Annals of Statistics*, 16:1141-1154.\n\nGray R. (2024). *cmprsk: Subdistribution Analysis of Competing Risks*. \n\nGuo C and So Y. (2018). Cause-Specific Analysis of Competing Risks Using the PHREG Procedure. In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. .\n\nSAS (2019). Statistical Analysis Software. Users' Guide Statistics Version 9.4. SAS Institute Inc., Cary.", + "markdown": "---\ntitle: \"Estimating Cumulative Incidence Functions Using SAS\"\n---\n\n## Objective\n\nIn this document we present how to estimate the cumulative incidence function (CIF) in SAS (version 9.4). We focus on the competing risks model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups: ALL, AML-Low Risk, AML-High Risk.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days. This variable is not used here.\n\n- A new variable `ID` is created.\n\nSAS code to prepare the data:\n\n```sas\nproc format;\n value DiseaseGroup 1='ALL'\n 2='AML-Low Risk'\n 3='AML-High Risk';\n value EventStatus 0='Censored'\n 1='Relapse'\n 2='Death';\nrun;\n\nlibname datalib \"..\\data\";\ndata bmt;\n set datalib.bmt;\n TYears = T / 365.25;\n ID = _n_;\n format Group DiseaseGroup.;\n format Status EventStatus.;\nrun;\n```\n\n## Estimating CIFs in SAS\n\nPROC LIFETEST is used to estimate the CIFs in SAS. For illustration, we model the time to relapse.\n\n```sas\nods graphics on;\nproc lifetest data=bmt \n plots=cif(test) \n error=aalen\n conftype=loglog\n outcif=cif1 \n timelist=0.5 1 1.5 2 3; \n time Tyears * Status(0) / eventcode=1; \n strata Group / order=internal; \n format Group DiseaseGroup.;\nrun; \nods graphics off;\n```\n\nBelow are selected outputs for comparison with the R outputs in the companion document.\n\nCIF estimates for time to relapse at selected timepoints for 'AML-Low Risk' patients:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cifSAS.jpg){fig-align='center' width=75%}\n:::\n:::\n\n\nCIF estimates for time to relapses:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cifPlot.png){fig-align='center' width=75%}\n:::\n:::\n\n\nTwo points to note:\n\n1. By default the variance of the estimated CIF are estimated with Aalen's asymptotic method. This can be changed to the delta method by setting `error=delta` in the PROC LIFETEST statement.\n\n2. By default the log-log transformation is used to produce the pointwise confidence intervals (CIs) for the estimated CIFs. To select other methods, for instance log, set `conftype=log`.\n\n## Reference\n\nAalen O. (1978). Nonparametric Estimation of Partial Transition Probabilities in Multiple Decrement Models, *Annals of Statistics*, 6:534-545.\n\nGray R. (1988). A Class of K-Sample Tests for Comparing the Cumulative Incidence of a Competing Risk, *Annals of Statistics*, 16:1141-1154.\n\nGray R. (2024). *cmprsk: Subdistribution Analysis of Competing Risks*. \n\nGuo C and So Y. (2018). Cause-Specific Analysis of Competing Risks Using the PHREG Procedure. In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. .\n\nSAS (2019). Statistical Analysis Software. Users' Guide Statistics Version 9.4. SAS Institute Inc., Cary.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/survival_csh/execute-results/html.json b/_freeze/SAS/survival_csh/execute-results/html.json index 949e00ae3..bb19b2e85 100644 --- a/_freeze/SAS/survival_csh/execute-results/html.json +++ b/_freeze/SAS/survival_csh/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "4b264e271904357ddefbaa531a530764", + "hash": "bff4af2d41b3cfdf0421eee474218f75", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Estimating and Testing Cause Specific Hazard Ratio Using SAS\"\nexecute: \n eval: false\n---\n\n## Objective\n\nIn this document we present how to estimate and test cause specific hazard ratio for the probability of experiencing a certain event at a given time in a competing risks model in SAS (version 9.4). We focus on the basic model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\nAs this document aims to provide syntax for estimating and testing cause-specific hazard ratios using Cox's PH model for competing risks, we assume that readers have working knowledge of a competing risks framework. The [Reference] below list a few literature for a quick refresher on this topic.\n\nThe syntax given here produce results match that by R package `survival`, in particular with function `coxph()` (see the companion R document). This is usually necessary if validating results from the two software is the objective.\n\n## SAS procedure\n\nWe use `PROC PHREG` in this document.\n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days.\n\n- For illustration, a categorical variable `waitCat` is created from `waitTime` as `waitCat = TRUE` if `waitTime > 200`, and `FALSE` otherwise.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc format;\n value DiseaseGroup 1='ALL'\n 2='AML-Low Risk'\n 3='AML-High Risk';\n value EventStatus 0='Censored'\n 1='Relapse'\n 2='Death';\nrun;\n\nlibname datalib \"..\\data\";\ndata bmt;\n set datalib.bmt;\n TYears = T / 365.25;\n waitCat = (waitTime>200);\n ID = _n_;\n format Group DiseaseGroup.;\n format Status EventStatus.;\nrun;\n```\n:::\n\n\n## Estimating and testing the cause specific hazard ratio\n\n### Syntax 1: all events in one go\n\nStarting in SAS/STAT 14.3, all competing events can be estimated together. However, currently this syntax does not allow the `strata` statement.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc phreg data=Bmt;\n title 'Cause-Specific Hazard Regression for Relapse and Death without strata';\n class Group (order=internal ref=first);\n model T*Status(0)=Group / eventcode(cox)=1;\nrun;\n```\n:::\n\n\nThe results for both events are given below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cshSAS_1.png){fig-align='center' width=100% height=120%}\n:::\n:::\n\n\nThree points to note:\n\n1. The option `eventcode(cox)=1` tells PHREG that `Relapse` (event 1) is the event of interest, and `Death` (event 2) is the competing risk.\n\n2. This results are essentially the same as modeling `Relapse` and `Death` separately: there are two global hypotheses, one for each event.\n\n3. This is different from fitting all events in one model that is done in R `coxph()`. In other words, *this is entirely a different model from what R does when modeling all competing events together.* (See Syntax 1 in the Companion R document.)\n\n4. Additionally, since `strata` statement cannot be incorporated, the results for each event are different from that produced by Syntax 1 in the R document.\n\nFor more information, please see [Guo C and So Y. (2018)](https://support.sas.com/resources/papers/proceedings18/2159-2018.pdf).\n\n### Syntax 2: Estimating one event at a time\n\nWe use `Relapse` as an example.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nods output ParameterEstimates=p1;\nproc phreg data=bmt; \n title 'Cause-Specific Hazard Regression for Relapse with strata';\n class Group (order=internal ref=first) waitCat;\n strata waitCat;\n model TYears*Status(0,2) = Group / risklimits alpha = 0.05;\nrun;\nquit;\n```\n:::\n\n\nThe results for event `Relapse` are given below:\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\nNote that if there is no stratification, the results will be the same as from Syntax 1 above but for `Relapse` only.\n\n## Summary\n\n- In `PROC PHREG`, by default Breslow's method is used for handling ties. To match the default results with R `survival::coxph()` which uses Efron's method, this needs to be requested via `ties = efron` option in the `model` statement.\n\n- For multi-state models such as a competing risk analysis, the R function `survival::coxph()` by default estimate the standard errors of parameter estimates with a robust sandwich estimator. To match results with R, the option `covsandwich` or `covs` for short, need to be added to the `proc phreg` statement.\n\n- Due to the different internal numerical estimation methods of R and SAS, results only match up to the 4th decimal places. However, overall consistency can be established between the two for estimating and testing cause-specific hazard ratio using Cox's PH model.\n\n## Reference\n\nGuo C and So Y. (2018). \"Cause-specific analysis of competing risks using the PHREG procedure.\" In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. \n\nPintilie M. (2006). *Competing Risks: A Practical Perspective*. Wiley. \n\nTherneau T, Crowson C, and Atkinson E. (2024). \"Multi-state models and competing risks.\" ", + "markdown": "---\ntitle: \"Estimating and Testing Cause Specific Hazard Ratio Using SAS\"\n---\n\n## Objective\n\nIn this document we present how to estimate and test cause specific hazard ratio for the probability of experiencing a certain event at a given time in a competing risks model in SAS (version 9.4). We focus on the basic model where each subject experiences only one out of *k* possible events as depicted in the figure below.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cr.jpg){fig-align='center' width=25%}\n:::\n:::\n\n\nAs this document aims to provide syntax for estimating and testing cause-specific hazard ratios using Cox's PH model for competing risks, we assume that readers have working knowledge of a competing risks framework. The [Reference] below list a few literature for a quick refresher on this topic.\n\nThe syntax given here produce results match that by R package `survival`, in particular with function `coxph()` (see the companion R document). This is usually necessary if validating results from the two software is the objective.\n\n## SAS procedure\n\nWe use `PROC PHREG` in this document.\n\n### Data used\n\nThe bone marrow transplant (BTM) dataset as presented by Guo & So (2018) is used. The dataset has the following variables:\n\n- `Group` has three levels, indicating three disease groups.\n\n- `T` is the disease-free survival time in days. A derived variable `TYears = T/365.25` is used in the analysis.\n\n- `Status` has value 0 if `T` is censored; 1 if `T` is time to relapse; 2 if `T` is time to death.\n\n- `WaitTime` is the waiting time to transplant in days.\n\n- For illustration, a categorical variable `waitCat` is created from `waitTime` as `waitCat = TRUE` if `waitTime > 200`, and `FALSE` otherwise.\n\n```sas\nproc format;\n value DiseaseGroup 1='ALL'\n 2='AML-Low Risk'\n 3='AML-High Risk';\n value EventStatus 0='Censored'\n 1='Relapse'\n 2='Death';\nrun;\n\nlibname datalib \"..\\data\";\ndata bmt;\n set datalib.bmt;\n TYears = T / 365.25;\n waitCat = (waitTime>200);\n ID = _n_;\n format Group DiseaseGroup.;\n format Status EventStatus.;\nrun;\n```\n\n## Estimating and testing the cause specific hazard ratio\n\n### Syntax 1: all events in one go\n\nStarting in SAS/STAT 14.3, all competing events can be estimated together. However, currently this syntax does not allow the `strata` statement.\n\n```sas\nproc phreg data=Bmt;\n title 'Cause-Specific Hazard Regression for Relapse and Death without strata';\n class Group (order=internal ref=first);\n model T*Status(0)=Group / eventcode(cox)=1;\nrun;\n```\n\nThe results for both events are given below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cshSAS_1.png){fig-align='center' width=100% height=120%}\n:::\n:::\n\n\nThree points to note:\n\n1. The option `eventcode(cox)=1` tells PHREG that `Relapse` (event 1) is the event of interest, and `Death` (event 2) is the competing risk.\n\n2. This results are essentially the same as modeling `Relapse` and `Death` separately: there are two global hypotheses, one for each event.\n\n3. This is different from fitting all events in one model that is done in R `coxph()`. In other words, *this is entirely a different model from what R does when modeling all competing events together.* (See Syntax 1 in the Companion R document.)\n\n4. Additionally, since `strata` statement cannot be incorporated, the results for each event are different from that produced by Syntax 1 in the R document.\n\nFor more information, please see [Guo C and So Y. (2018)](https://support.sas.com/resources/papers/proceedings18/2159-2018.pdf).\n\n### Syntax 2: Estimating one event at a time\n\nWe use `Relapse` as an example.\n\n```sas\nods output ParameterEstimates=p1;\nproc phreg data=bmt; \n title 'Cause-Specific Hazard Regression for Relapse with strata';\n class Group (order=internal ref=first) waitCat;\n strata waitCat;\n model TYears*Status(0,2) = Group / risklimits alpha = 0.05;\nrun;\nquit;\n```\n\nThe results for event `Relapse` are given below:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/survival_competing_risks/cshSAS_2.png){fig-align='center' width=100%}\n:::\n:::\n\n\nNote that if there is no stratification, the results will be the same as from Syntax 1 above but for `Relapse` only.\n\n## Summary\n\n- In `PROC PHREG`, by default Breslow's method is used for handling ties. To match the default results with R `survival::coxph()` which uses Efron's method, this needs to be requested via `ties = efron` option in the `model` statement.\n\n- For multi-state models such as a competing risk analysis, the R function `survival::coxph()` by default estimate the standard errors of parameter estimates with a robust sandwich estimator. To match results with R, the option `covsandwich` or `covs` for short, need to be added to the `proc phreg` statement.\n\n- Due to the different internal numerical estimation methods of R and SAS, results only match up to the 4th decimal places. However, overall consistency can be established between the two for estimating and testing cause-specific hazard ratio using Cox's PH model.\n\n## Reference\n\nGuo C and So Y. (2018). \"Cause-specific analysis of competing risks using the PHREG procedure.\" In *Proceedings of the SAS Global Forum 2018 Conference*. Cary, NC: SAS Institute Inc. \n\nPintilie M. (2006). *Competing Risks: A Practical Perspective*. Wiley. \n\nTherneau T, Crowson C, and Atkinson E. (2024). \"Multi-state models and competing risks.\" ", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/tipping_point/execute-results/html.json b/_freeze/SAS/tipping_point/execute-results/html.json index 7088bf79f..c3439254b 100644 --- a/_freeze/SAS/tipping_point/execute-results/html.json +++ b/_freeze/SAS/tipping_point/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "5ec50e463fcfb953bdee238fabccaa48", + "hash": "a544a7d8a201dbd014e16e9bb12a6004", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"SAS Tipping Point (Delta Adjustment): Continuous Data\"\nexecute: \n eval: false\n---\n\n\n\n# Tipping Point / Delta Adjustment\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\n\nThe concept of delta adjustment and tipping point analysis builds on the framework of reference-based multiple imputation (rbmi) as seen on its respective [CAMIS webpage](../SAS/rbmi_continuous_joint_SAS.html). The use of the `five macros` ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)) in SAS for the following standard and reference-based multiple imputation approaches are introduced there:\n\n- Missing At Random (MAR)\n\n- Jump to Reference (J2R)\n\n- Copy Reference (CR)\n\n- Copy Increment from Reference (CIR)\n\nEssentially, the workflow comes down to sequentially running the five macro's:\n\n- `Part1A()` declares the parameter estimation model and checks consistency with the dataset. It builds a master dataset which holds details of the current job (run of the macros in sequence). It also builds indexes for the classification variables, which may be either numeric or character.\n\n- `Part1B()` fits the parameter estimation model using the MCMC procedure and draws a pseudo-independent sample from the joint posterior distribution for the linear predictor parameters and the covariance parameters.\n\n- `Part2A()` calculates the predicted mean under MAR, and under MNAR for each subject based on their withdrawal pattern once for each draw of the linear predictor parameter estimates. The choice of MNAR is controlled by the method used, which may vary from subject to subject.\n\n- `Part2B()` imputes the intermediate missing values using MAR and the trailing missing values using MNAR, by deriving the conditional distribution for the missing values conditional on the observed values and covariates, using the appropriate sampled covariance parameter estimates.\n\n- `Part3()` carries out a univariate ANOVA analysis at selected time points usually based on the same covariates as the parameter estimation model. It then combines the least-squares means and their differences using the MIANALYZE procedure to provide final results. It is in this macro which handles the Delta methods.\n\nThe `five macros` are available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data) under *Imputation based approaches* \\> *Reference-based MI via Multivariate Normal RM (the \"five macros\" and MIWithD)* \\> *Downloads*. Please make sure to familiarize yourself with its functionalities before checking this tutorial. For more details, see the user guide available upon download ([Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data)).\n\n### Data\n\nThe same publicly available [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial that was used to illustrate rbmi is again used for this tutorial. This dataset is also used in the R version of tipping point guidance on this [CAMIS webpage](https://psiaims.github.io/CAMIS/R/tipping_point.html) and the [quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html) of the `rbmi` R package.\n\nThe relevant endpoint for the antidepressant trial was assessed using the Hamilton 17-item depression rating scale (HAMD17), which was measured at baseline and subsequently at weeks 1, 2, 3, 4 and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects in the active drug group, compared to 26% (23/88) of subjects in the placebo group. Importantly, all data after study drug discontinuation are missing and there is a single intermittent missing observation.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc print data=dat (obs=10);\n var PATIENT GENDER THERAPY RELDAYS VISIT BASVAL HAMDTL17 CHANGE;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_1.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe number of patients per visit and treatment group are:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc freq data=dat;\n table VISIT*THERAPY / norow nocol nopercent nocum;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_2.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe mean change from baseline of the HAMD17 endpoint per visit and treatment group using only the complete cases are:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc means data=dat n mean nonobs;\n class VISIT THERAPY;\n var CHANGE;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_3.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe missingness pattern is:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc transpose data=dat out=HAMD_wide(drop=_NAME_) prefix=CHG;\n by PATIENT THERAPY BASVAL;\n id VISIT;\n var CHANGE;\nrun;\n\nproc mi data=HAMD_wide nimpute=0 displaypattern=NOMEANS;\n var CHG4 CHG5 CHG6 CHG7;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_4.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThere is a single patient with an intermittent missing observation at visit 5, which is patient 3618. Special considerations need to be taken when applying delta adjustments to intermittent missing observations like this one (more on this below).\n\n## Tipping point analysis and delta adjustment\n\n### Methodology introduction\n\nWhen analyses for endpoints are performed under MAR or MNAR assumptions for missing data, it is important to perform sensitivity analyses to assess the impact of deviations from these assumptions. Tipping point analysis (or delta adjustment method) is an example of a sensitivity analysis that can be used to assess the robustness of a clinical trial when its result is based on imputed missing data.\n\nGenerally, tipping point analysis explores the influence of missingness on the overall conclusion of the treatment difference by shifting imputed missing values in the treatment group towards the reference group until the result becomes non-significant. The tipping point is the minimum shift needed to make the result non-significant. If the minimum shift needed to make the result non-significant is implausible, then greater confidence in the primary results can be inferred.\n\nTipping point analysis generally happens by adjusting imputing values by a so-called delta values. The observed tipping point is the minimum delta needed to make the result non-significant. Mostly a range of delta values is explored and only imputed values from the active treatment group are adjusted by the delta value. However, delta adjustments to the control group are possible as well. Naturally, the range of acceptable values for delta should be agreed a priori, before taking this approach.\n\nFor an extensive discussion on delta adjustment methods, we refer to [Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/).\n\n## Tipping point analysis: MAR approach\n\nAs mentioned, we will illustrate the use of the so-called `five macros` in SAS for delta adjustment and tipping point analysis. These are available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data) under *Imputation based approaches* \\> *Reference-based MI via Multivariate Normal RM (the \"five macros\" and MIWithD)* \\> *Downloads*.\n\n### Perform tipping point analysis\n\nTo conduct a tipping point analysis under the MAR assumption, we simply specify `method = MAR` under `Part2A()` of the `five macros`. Generally, the rest of `Part1` and `Part2` are the same as in the scenario without any delta adjustment.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%part1A(jobname = HAMD, \n Data=dat,\n Subject=PATIENT,\n RESPONSE = CHANGE,\n Time = VISIT,\n Treat = THERAPY,\n Covbytime = BASVAL,\n Catcov = GENDER);\n\n%part1B(jobname = HAMD,\n Ndraws = 500,\n thin = 10,\n seed = 12345);\n\n%part2A(jobname = HAMD_MAR,\n inname = HAMD,\n method = MAR);\n\n%part2B(jobname = HAMD_MAR,\n seed = 12345);\n```\n:::\n\n\nThen, in `Part3()`, we create a series of delta values that increases sequentially for the intervention group by changing the `Delta` argument within a do loop, and setting `Dgroups = DRUG` and `Dlag = 1 0 0 0` (more on this below).\n\nA clear description of these arguments is given in the documentation:\n\n- `Delta`: A vector of Delta values from visit 1 up to the final visit (default is not applying a delta adjustment)\n- `Dlag`: A vector of multipliers for each Lag after withdrawal for the Delta values (default is all values of 1)\n- `Dgroups`: The treatment groups to which Delta should be applied (default is all treatment groups)\n\nTo automate the tipping point analysis, you can create a new macro like shown below. The first part of this macro prints all results, while the second part prints the non-significant and significant results separately by filtering on `Probt`.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata all_results;\n length DELTA 8 VISIT $10 THERAPY $20 _THERAPY $20 Diff SE_Diff df Probt LCL_Diff UCL_Diff 8;\n stop;\nrun;\n\n%macro part3_TP;\n\n%do delta = -5 %to 10 %by 1;\n%part3(Jobname = HAMD_MAR, anref=PLACEBO, Delta = &delta &delta &delta &delta, DLag = 1 0 0 0, DGroups = DRUG, Label=MAR);\n\ndata current_result;\n set HAMD_MAR_OUT(keep = VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff);\n DELTA = δ\nrun;\n\nproc append base=all_results data=current_result force nowarn;\nrun;\n\n%end;\nproc print data = all_results noobs label;\n where VISIT = \"7\";\n var DELTA VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\n title \"MAR: all results\";\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n\nproc sql noprint;\n create table delta_ge_05 as\n select distinct DELTA\n from all_results\n where Probt >= 0.05 and not missing(Probt) and VISIT = \"7\";\n\n select DELTA into :non_sig_delta separated by ' '\n from delta_ge_05;\n\n create table delta_lt_05 as\n select distinct DELTA\n from all_results\n where Probt < 0.05 and not missing(Probt) and VISIT = \"7\";\n\n select DELTA into :sig_delta separated by ' '\n from delta_lt_05;\nquit;\n\nproc print data = all_results noobs label;\n where DELTA in (&non_sig_delta.) and VISIT = \"7\";\n var DELTA VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\n title \"MAR: non-significant results\";\nrun;\n\nproc print data = all_results noobs label;\n where DELTA in (&sig_delta.) and VISIT = \"7\";\n var DELTA VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\n title \"MAR: significant results\";\nrun;\n\n%mend;\n\n%part3_TP;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_non_sig_results.png){fig-align='center' width=80%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_sig_results.png){fig-align='center' width=80%}\n:::\n:::\n\n\nTo determine the **exact** tipping point between the last \"significant\" delta and the first \"non-significant\" delta, you may manually perform linear interpolation for `DELTA`, `Diff`, `LCL_Diff` and `UCL_Diff` at `Probt = 0.05`.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_TP.png){fig-align='center' width=80%}\n:::\n:::\n\n\n[**Note:**]{.underline} In the `five macros`, delta adjustments happen right after data imputation under MAR or MNAR (using reference-based imputation approaches) in `Part2`, but before implementing the analysis model in `Part3`. Sensitivity analyses can therefore be performed without having to refit the imputation model, which is computationally efficient. This approach is considered a *marginal* delta adjustment approach, because the delta is simply added to the mean of the conditional multivariate normal distribution (conditional on the observed values and the covariates) for the imputation model ([Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data)).\n\n### Visualize results\n\nA nice visualization of this tipping point analysis for the MAR approach is shown below. The dashed horizontal line indicates a p-value of 0.05 in the left plot and no treatment effect in the right plot.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_est_pval.png){fig-align='center' width=80%}\n:::\n:::\n\n\nWe clearly see that the p-value under MAR reaches a tipping point from 3 onward in the range of delta's considered.\n\n## Comparison with rbmi MNAR approaches\n\n### Summary of results\n\nIn the table below we present the results of the different imputation strategies with varying number of multiple imputation draws, M = 500 and M = 5000. Note that the results can be slightly different from the results above due to a possible different seed. The estimates show the contrast at visit 7 between DRUG and PLACEBO (DRUG - PLACEBO). Delta adjustments were applied to all imputed missing data, *except* intermittent missing data, in the intervention group only.\n\n| Method | Delta control | Delta intervention at TP | Estimate at TP | 95% CI | P-value | Original estimate | Original p-value |\n|---------|---------|---------|---------|---------|---------|---------|---------|\n| MI - MAR (M=500) | 0 | 3 | -2.081 | -4.337 to 0.175 | 0.0703 | -2.810 | 0.0134 |\n| MI - MAR (M=5000) | 0 | 3 | -2.096 | -4.353 to 0.161 | 0.0684 | -2.825 | 0.0130 |\n| MI - MNAR J2R (M=500) | 0 | -1 | -2.365 | -4.604 to -0.125 | 0.0386 | -2.122 | 0.0650 |\n| MI - MNAR J2R (M=5000) | 0 | -1 | -2.387 | -4.617 to -0.157 | 0.0361 | -2.144 | 0.0611 |\n| MI - MNAR CR (M=500) | 0 | 1 | -2.141 | -4.370 to 0.089 | 0.0597 | -2.384 | 0.0350 |\n| MI - MNAR CR (M=5000) | 0 | 1 | -2.157 | -4.377 to 0.062 | 0.0566 | -2.400 | 0.0330 |\n| MI - MNAR CIR (M=500) | 0 | 1 | -2.218 | -4.446 to 0.010 | 0.0510 | -2.461 | 0.0296 |\n| MI - MNAR CIR (M=5000) | 0 | 2 | -1.995 | -4.229 to 0.240 | 0.0798 | -2.481 | 0.0276 |\n\nOf all considered approaches, the MAR approach yields the largest delta adjustment at its tipping point, with a delta intervention of 3 at both M = 500 and M = 5000. This indicates that the MAR assumption is the most robust against slight deviations of its conditions. Notice that for the MNAR JR approach we included, for completeness, tipping point analyses to know when the results switch from non-significant to significant. Correspondingly, two negative delta's (-1) are found at the tipping point. This is expected, given that the original analyses are non-significant (p \\~ 0.0650 and p \\~ 0.0611) and a tipping point analysis here aims to find the point at which the analysis turns to be significant, instead of non-significant.\n\n### Visual comparison\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_comparison_est.png){fig-align='center' width=80%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_comparison_pval.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Flexible delta adjustments\n\nSo far, we have only considered simple delta adjustments that add the same value to all imputed missing data. However, you may want to implement more flexible delta adjustments for missing data following an intercurrent event (ICE), where the magnitude of the delta varies depending on the distance of the visit from the ICE visit.\n\nTo enable flexible delta adjustments, `Part3()` includes three delta arguments: `Delta`, `DLag` and `DGroups`. Although these arguments were introduced earlier in the tutorial, we will now elaborate on how exactly these arguments allow for flexible delta adjustments with a few examples taken from the [advanced functionality vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses) of the `rbmi` R package.\n\n### Scaling delta by visit\n\nAssume a setting with 4 visits and the user specified `Delta = 5 6 7 8` and `DLag = 1 2 3 4`. For a subject for whom the first visit affected by the ICE is visit 2, these values of `Delta` and `DLag` would imply the following delta adjustments:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| DLag | 0 | 1 | 2 | 3 |\n| Delta \\* DLag | 0 | 6 | 14 | 24 |\n| Cumulative sum | 0 | 6 | 20 | 44 |\n\nThat is, the subject would have a delta adjustment of 0 applied to visit 1, 6 for visit 2, 20 for visit 3 and 44 for visit 4.\n\nAssume instead, that the subject’s first visit affected by the ICE was visit 3. Then, the above values of `Delta` and `DLag` would imply the following delta adjustment:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| DLag | 0 | 0 | 1 | 2 |\n| Delta \\* DLag | 0 | 0 | 7 | 16 |\n| Cumulative sum | 0 | 0 | 7 | 23 |\n\nAnd thus the subject would have a delta adjustment of 0 applied to visits 1 and 2, 7 for visit 3 and 23 for visit 4.\n\nAnother way of using these arguments is to set `Delta` to the difference in time between visits and `DLag` to be the amount of delta per unit of time. For example, let’s say that visits occur on weeks 1, 5, 6 and 9 and that we want a delta of 3 to be applied for each week after an ICE. For simplicity, we assume that the ICE occurs immediately after the subject’s last visit which is not affected by the ICE. This this could be achieved by setting `Delta = 1 4 1 3`, i.e. the difference in weeks between each visit, and `DLag = 3 3 3 3`.\n\nAssume a subject’s first visit affected by the ICE was visit 2, then these values of `Delta` and `DLag` would imply the following delta offset:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 1 | 4 | 1 | 3 |\n| DLag | 0 | 3 | 3 | 3 |\n| Delta \\* DLag | 0 | 12 | 3 | 9 |\n| Cumulative sum | 0 | 12 | 15 | 24 |\n\nLet's now consider the antidepressant data again. Suppose we apply a delta adjustment of 2 for each week following an ICE in the intervention group only. For example, if the ICE took place immediately after visit 4, then the cumulative delta applied to a missing value from visit 5 would be 2, from visit 6 would be 4, and from visit 7 would be 6.\n\nTo program this, we would define `Delta`, `DLag` and `DGroups` in `Part3()` of the `five macros` as follows:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%part3(Jobname = HAMD_MAR, \n anref=PLACEBO, \n Delta = 2 2 2 2, \n DLag = 1 1 1 1, \n DGroups = DRUG, \n Label=MAR);\n```\n:::\n\n\nNotice that `DLag = 1 1 1 1` is the default for this argument in SAS, you may also leave it unspecified in this case.\n\n### Fixed delta\n\nAs already illustrated in the tipping point analysis assuming MAR above, you may also add a simple, fixed delta using the `Delta` and `DLag` arguments. To do this, `Delta` should be the same value repeated for each visit, e.g. `5 5 5 5`, while `DLag` should be `1 0 0 0`. This ensures a delta of 5 is added to each imputed missing value following an ICE, which we here assume to occur at the visit 2:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 5 | 5 | 5 |\n| Dlag | 0 | 1 | 0 | 0 |\n| Delta \\* dlag | 0 | 0 | 0 | 0 |\n| Cumulative sum | 0 | 5 | 5 | 5 |\n\nTo apply this delta = 5 to both groups we leave `DGroups` unspecified.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n%part3(Jobname = HAMD_MAR, \n anref=PLACEBO, \n Delta = 5 5 5 5, \n DLag = 1 0 0 0, \n Label=MAR);\n```\n:::\n\n\nIn the `five macros`, delta adjustments are not applied to intermittent missing observations, but only to missing observations after withdrawal. From the documentation, it seems like this cannot be altered. In contrast, the choice of which missing data to apply delta adjustments to can be more freely managed using the `rbmi` R package. This may lead to discrepancies between tipping point analyses conducted in SAS and R, and may have important implications for datasets with high proportions of intermittent missing values in particular.\n\n[**Note:**]{.underline} By making use of the `DGroupsV` argument in `Part3` (see `five macros` documentation) one can specify a variable in the dataset that indicates whether a delta adjustment should be applied to any imputed value for the record after withdrawal. The `five macros` documentation states that \"*As an alternative to DGroups=, this option specifies a numeric variable with a logical value indicating whether Delta should be applied to any imputed value for the record. This variable should be Numeric holding a logical value.*\". However, from our experience the intermittent missing values are not delta adjusted using this argument, only the missing values after withdrawal.\n\n[**Note:**]{.underline} By making use of the `DeltaV` argument in `Part3` (see `five macros` documentation) one can set specific delta adjustments by records.\n\n## References\n\n[Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/). Sensitivity analysis for clinical trials with missing continuous outcome data using controlled multiple imputation: A practical guide. *Statistics in Medicine*. 2020;39(21):2815-2842.\n\n[rbmi: Advanced Functionality](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses)\n\n[rbmi: Quickstart](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html)\n\n[Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. Addressing intercurrent events: Treatment policy and hypothetical strategies. *Joint EFSPI and BBS virtual event.*\n\n[Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data). Fitting reference-based models for missing data to longitudinal repeated-measures Normal data. User guide five macros.", + "markdown": "---\ntitle: \"SAS Tipping Point (Delta Adjustment): Continuous Data\"\n---\n\n\n\n# Tipping Point / Delta Adjustment\n\n## Reference-based multiple imputation (rbmi)\n\n### Methodology introduction\n\nThe concept of delta adjustment and tipping point analysis builds on the framework of reference-based multiple imputation (rbmi) as seen on its respective [CAMIS webpage](../SAS/rbmi_continuous_joint_SAS.html). The use of the `five macros` ([Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf)) in SAS for the following standard and reference-based multiple imputation approaches are introduced there:\n\n- Missing At Random (MAR)\n\n- Jump to Reference (J2R)\n\n- Copy Reference (CR)\n\n- Copy Increment from Reference (CIR)\n\nEssentially, the workflow comes down to sequentially running the five macro's:\n\n- `Part1A()` declares the parameter estimation model and checks consistency with the dataset. It builds a master dataset which holds details of the current job (run of the macros in sequence). It also builds indexes for the classification variables, which may be either numeric or character.\n\n- `Part1B()` fits the parameter estimation model using the MCMC procedure and draws a pseudo-independent sample from the joint posterior distribution for the linear predictor parameters and the covariance parameters.\n\n- `Part2A()` calculates the predicted mean under MAR, and under MNAR for each subject based on their withdrawal pattern once for each draw of the linear predictor parameter estimates. The choice of MNAR is controlled by the method used, which may vary from subject to subject.\n\n- `Part2B()` imputes the intermediate missing values using MAR and the trailing missing values using MNAR, by deriving the conditional distribution for the missing values conditional on the observed values and covariates, using the appropriate sampled covariance parameter estimates.\n\n- `Part3()` carries out a univariate ANOVA analysis at selected time points usually based on the same covariates as the parameter estimation model. It then combines the least-squares means and their differences using the MIANALYZE procedure to provide final results. It is in this macro which handles the Delta methods.\n\nThe `five macros` are available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data) under *Imputation based approaches* \\> *Reference-based MI via Multivariate Normal RM (the \"five macros\" and MIWithD)* \\> *Downloads*. Please make sure to familiarize yourself with its functionalities before checking this tutorial. For more details, see the user guide available upon download ([Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data)).\n\n### Data\n\nThe same publicly available [dataset](https://r-packages.io/datasets/antidepressant_data) from an antidepressant clinical trial that was used to illustrate rbmi is again used for this tutorial. This dataset is also used in the R version of tipping point guidance on this [CAMIS webpage](https://psiaims.github.io/CAMIS/R/tipping_point.html) and the [quickstart vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html) of the `rbmi` R package.\n\nThe relevant endpoint for the antidepressant trial was assessed using the Hamilton 17-item depression rating scale (HAMD17), which was measured at baseline and subsequently at weeks 1, 2, 3, 4 and 6 (visits 4-7). Study drug discontinuation occurred in 24% (20/84) of subjects in the active drug group, compared to 26% (23/88) of subjects in the placebo group. Importantly, all data after study drug discontinuation are missing and there is a single intermittent missing observation.\n\n```sas\nproc print data=dat (obs=10);\n var PATIENT GENDER THERAPY RELDAYS VISIT BASVAL HAMDTL17 CHANGE;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_1.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe number of patients per visit and treatment group are:\n\n```sas\nproc freq data=dat;\n table VISIT*THERAPY / norow nocol nopercent nocum;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_2.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe mean change from baseline of the HAMD17 endpoint per visit and treatment group using only the complete cases are:\n\n```sas\nproc means data=dat n mean nonobs;\n class VISIT THERAPY;\n var CHANGE;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_3.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThe missingness pattern is:\n\n```sas\nproc transpose data=dat out=HAMD_wide(drop=_NAME_) prefix=CHG;\n by PATIENT THERAPY BASVAL;\n id VISIT;\n var CHANGE;\nrun;\n\nproc mi data=HAMD_wide nimpute=0 displaypattern=NOMEANS;\n var CHG4 CHG5 CHG6 CHG7;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_data_exploration_4.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\nThere is a single patient with an intermittent missing observation at visit 5, which is patient 3618. Special considerations need to be taken when applying delta adjustments to intermittent missing observations like this one (more on this below).\n\n## Tipping point analysis and delta adjustment\n\n### Methodology introduction\n\nWhen analyses for endpoints are performed under MAR or MNAR assumptions for missing data, it is important to perform sensitivity analyses to assess the impact of deviations from these assumptions. Tipping point analysis (or delta adjustment method) is an example of a sensitivity analysis that can be used to assess the robustness of a clinical trial when its result is based on imputed missing data.\n\nGenerally, tipping point analysis explores the influence of missingness on the overall conclusion of the treatment difference by shifting imputed missing values in the treatment group towards the reference group until the result becomes non-significant. The tipping point is the minimum shift needed to make the result non-significant. If the minimum shift needed to make the result non-significant is implausible, then greater confidence in the primary results can be inferred.\n\nTipping point analysis generally happens by adjusting imputing values by a so-called delta values. The observed tipping point is the minimum delta needed to make the result non-significant. Mostly a range of delta values is explored and only imputed values from the active treatment group are adjusted by the delta value. However, delta adjustments to the control group are possible as well. Naturally, the range of acceptable values for delta should be agreed a priori, before taking this approach.\n\nFor an extensive discussion on delta adjustment methods, we refer to [Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/).\n\n## Tipping point analysis: MAR approach\n\nAs mentioned, we will illustrate the use of the so-called `five macros` in SAS for delta adjustment and tipping point analysis. These are available at [LSHTM DIA Missing Data](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data) under *Imputation based approaches* \\> *Reference-based MI via Multivariate Normal RM (the \"five macros\" and MIWithD)* \\> *Downloads*.\n\n### Perform tipping point analysis\n\nTo conduct a tipping point analysis under the MAR assumption, we simply specify `method = MAR` under `Part2A()` of the `five macros`. Generally, the rest of `Part1` and `Part2` are the same as in the scenario without any delta adjustment.\n\n```sas\n%part1A(jobname = HAMD, \n Data=dat,\n Subject=PATIENT,\n RESPONSE = CHANGE,\n Time = VISIT,\n Treat = THERAPY,\n Covbytime = BASVAL,\n Catcov = GENDER);\n\n%part1B(jobname = HAMD,\n Ndraws = 500,\n thin = 10,\n seed = 12345);\n\n%part2A(jobname = HAMD_MAR,\n inname = HAMD,\n method = MAR);\n\n%part2B(jobname = HAMD_MAR,\n seed = 12345);\n```\n\nThen, in `Part3()`, we create a series of delta values that increases sequentially for the intervention group by changing the `Delta` argument within a do loop, and setting `Dgroups = DRUG` and `Dlag = 1 0 0 0` (more on this below).\n\nA clear description of these arguments is given in the documentation:\n\n- `Delta`: A vector of Delta values from visit 1 up to the final visit (default is not applying a delta adjustment)\n- `Dlag`: A vector of multipliers for each Lag after withdrawal for the Delta values (default is all values of 1)\n- `Dgroups`: The treatment groups to which Delta should be applied (default is all treatment groups)\n\nTo automate the tipping point analysis, you can create a new macro like shown below. The first part of this macro prints all results, while the second part prints the non-significant and significant results separately by filtering on `Probt`.\n\n```sas\ndata all_results;\n length DELTA 8 VISIT $10 THERAPY $20 _THERAPY $20 Diff SE_Diff df Probt LCL_Diff UCL_Diff 8;\n stop;\nrun;\n\n%macro part3_TP;\n\n%do delta = -5 %to 10 %by 1;\n%part3(Jobname = HAMD_MAR, anref=PLACEBO, Delta = &delta &delta &delta &delta, DLag = 1 0 0 0, DGroups = DRUG, Label=MAR);\n\ndata current_result;\n set HAMD_MAR_OUT(keep = VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff);\n DELTA = δ\nrun;\n\nproc append base=all_results data=current_result force nowarn;\nrun;\n\n%end;\nproc print data = all_results noobs label;\n where VISIT = \"7\";\n var DELTA VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\n title \"MAR: all results\";\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_all_results.png){fig-align='center' width=80%}\n:::\n:::\n\n\n```sas\n\nproc sql noprint;\n create table delta_ge_05 as\n select distinct DELTA\n from all_results\n where Probt >= 0.05 and not missing(Probt) and VISIT = \"7\";\n\n select DELTA into :non_sig_delta separated by ' '\n from delta_ge_05;\n\n create table delta_lt_05 as\n select distinct DELTA\n from all_results\n where Probt < 0.05 and not missing(Probt) and VISIT = \"7\";\n\n select DELTA into :sig_delta separated by ' '\n from delta_lt_05;\nquit;\n\nproc print data = all_results noobs label;\n where DELTA in (&non_sig_delta.) and VISIT = \"7\";\n var DELTA VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\n title \"MAR: non-significant results\";\nrun;\n\nproc print data = all_results noobs label;\n where DELTA in (&sig_delta.) and VISIT = \"7\";\n var DELTA VISIT THERAPY _THERAPY Diff SE_Diff df Probt LCL_Diff UCL_Diff;\n title \"MAR: significant results\";\nrun;\n\n%mend;\n\n%part3_TP;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_non_sig_results.png){fig-align='center' width=80%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_sig_results.png){fig-align='center' width=80%}\n:::\n:::\n\n\nTo determine the **exact** tipping point between the last \"significant\" delta and the first \"non-significant\" delta, you may manually perform linear interpolation for `DELTA`, `Diff`, `LCL_Diff` and `UCL_Diff` at `Probt = 0.05`.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_TP.png){fig-align='center' width=80%}\n:::\n:::\n\n\n[**Note:**]{.underline} In the `five macros`, delta adjustments happen right after data imputation under MAR or MNAR (using reference-based imputation approaches) in `Part2`, but before implementing the analysis model in `Part3`. Sensitivity analyses can therefore be performed without having to refit the imputation model, which is computationally efficient. This approach is considered a *marginal* delta adjustment approach, because the delta is simply added to the mean of the conditional multivariate normal distribution (conditional on the observed values and the covariates) for the imputation model ([Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data)).\n\n### Visualize results\n\nA nice visualization of this tipping point analysis for the MAR approach is shown below. The dashed horizontal line indicates a p-value of 0.05 in the left plot and no treatment effect in the right plot.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_MAR_est_pval.png){fig-align='center' width=80%}\n:::\n:::\n\n\nWe clearly see that the p-value under MAR reaches a tipping point from 3 onward in the range of delta's considered.\n\n## Comparison with rbmi MNAR approaches\n\n### Summary of results\n\nIn the table below we present the results of the different imputation strategies with varying number of multiple imputation draws, M = 500 and M = 5000. Note that the results can be slightly different from the results above due to a possible different seed. The estimates show the contrast at visit 7 between DRUG and PLACEBO (DRUG - PLACEBO). Delta adjustments were applied to all imputed missing data, *except* intermittent missing data, in the intervention group only.\n\n| Method | Delta control | Delta intervention at TP | Estimate at TP | 95% CI | P-value | Original estimate | Original p-value |\n|---------|---------|---------|---------|---------|---------|---------|---------|\n| MI - MAR (M=500) | 0 | 3 | -2.081 | -4.337 to 0.175 | 0.0703 | -2.810 | 0.0134 |\n| MI - MAR (M=5000) | 0 | 3 | -2.096 | -4.353 to 0.161 | 0.0684 | -2.825 | 0.0130 |\n| MI - MNAR J2R (M=500) | 0 | -1 | -2.365 | -4.604 to -0.125 | 0.0386 | -2.122 | 0.0650 |\n| MI - MNAR J2R (M=5000) | 0 | -1 | -2.387 | -4.617 to -0.157 | 0.0361 | -2.144 | 0.0611 |\n| MI - MNAR CR (M=500) | 0 | 1 | -2.141 | -4.370 to 0.089 | 0.0597 | -2.384 | 0.0350 |\n| MI - MNAR CR (M=5000) | 0 | 1 | -2.157 | -4.377 to 0.062 | 0.0566 | -2.400 | 0.0330 |\n| MI - MNAR CIR (M=500) | 0 | 1 | -2.218 | -4.446 to 0.010 | 0.0510 | -2.461 | 0.0296 |\n| MI - MNAR CIR (M=5000) | 0 | 2 | -1.995 | -4.229 to 0.240 | 0.0798 | -2.481 | 0.0276 |\n\nOf all considered approaches, the MAR approach yields the largest delta adjustment at its tipping point, with a delta intervention of 3 at both M = 500 and M = 5000. This indicates that the MAR assumption is the most robust against slight deviations of its conditions. Notice that for the MNAR JR approach we included, for completeness, tipping point analyses to know when the results switch from non-significant to significant. Correspondingly, two negative delta's (-1) are found at the tipping point. This is expected, given that the original analyses are non-significant (p \\~ 0.0650 and p \\~ 0.0611) and a tipping point analysis here aims to find the point at which the analysis turns to be significant, instead of non-significant.\n\n### Visual comparison\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_comparison_est.png){fig-align='center' width=80%}\n:::\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tipping_point/SAS_comparison_pval.png){fig-align='center' width=80%}\n:::\n:::\n\n\n## Flexible delta adjustments\n\nSo far, we have only considered simple delta adjustments that add the same value to all imputed missing data. However, you may want to implement more flexible delta adjustments for missing data following an intercurrent event (ICE), where the magnitude of the delta varies depending on the distance of the visit from the ICE visit.\n\nTo enable flexible delta adjustments, `Part3()` includes three delta arguments: `Delta`, `DLag` and `DGroups`. Although these arguments were introduced earlier in the tutorial, we will now elaborate on how exactly these arguments allow for flexible delta adjustments with a few examples taken from the [advanced functionality vignette](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses) of the `rbmi` R package.\n\n### Scaling delta by visit\n\nAssume a setting with 4 visits and the user specified `Delta = 5 6 7 8` and `DLag = 1 2 3 4`. For a subject for whom the first visit affected by the ICE is visit 2, these values of `Delta` and `DLag` would imply the following delta adjustments:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| DLag | 0 | 1 | 2 | 3 |\n| Delta \\* DLag | 0 | 6 | 14 | 24 |\n| Cumulative sum | 0 | 6 | 20 | 44 |\n\nThat is, the subject would have a delta adjustment of 0 applied to visit 1, 6 for visit 2, 20 for visit 3 and 44 for visit 4.\n\nAssume instead, that the subject’s first visit affected by the ICE was visit 3. Then, the above values of `Delta` and `DLag` would imply the following delta adjustment:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 6 | 7 | 8 |\n| DLag | 0 | 0 | 1 | 2 |\n| Delta \\* DLag | 0 | 0 | 7 | 16 |\n| Cumulative sum | 0 | 0 | 7 | 23 |\n\nAnd thus the subject would have a delta adjustment of 0 applied to visits 1 and 2, 7 for visit 3 and 23 for visit 4.\n\nAnother way of using these arguments is to set `Delta` to the difference in time between visits and `DLag` to be the amount of delta per unit of time. For example, let’s say that visits occur on weeks 1, 5, 6 and 9 and that we want a delta of 3 to be applied for each week after an ICE. For simplicity, we assume that the ICE occurs immediately after the subject’s last visit which is not affected by the ICE. This this could be achieved by setting `Delta = 1 4 1 3`, i.e. the difference in weeks between each visit, and `DLag = 3 3 3 3`.\n\nAssume a subject’s first visit affected by the ICE was visit 2, then these values of `Delta` and `DLag` would imply the following delta offset:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 1 | 4 | 1 | 3 |\n| DLag | 0 | 3 | 3 | 3 |\n| Delta \\* DLag | 0 | 12 | 3 | 9 |\n| Cumulative sum | 0 | 12 | 15 | 24 |\n\nLet's now consider the antidepressant data again. Suppose we apply a delta adjustment of 2 for each week following an ICE in the intervention group only. For example, if the ICE took place immediately after visit 4, then the cumulative delta applied to a missing value from visit 5 would be 2, from visit 6 would be 4, and from visit 7 would be 6.\n\nTo program this, we would define `Delta`, `DLag` and `DGroups` in `Part3()` of the `five macros` as follows:\n\n```sas\n%part3(Jobname = HAMD_MAR, \n anref=PLACEBO, \n Delta = 2 2 2 2, \n DLag = 1 1 1 1, \n DGroups = DRUG, \n Label=MAR);\n```\n\nNotice that `DLag = 1 1 1 1` is the default for this argument in SAS, you may also leave it unspecified in this case.\n\n### Fixed delta\n\nAs already illustrated in the tipping point analysis assuming MAR above, you may also add a simple, fixed delta using the `Delta` and `DLag` arguments. To do this, `Delta` should be the same value repeated for each visit, e.g. `5 5 5 5`, while `DLag` should be `1 0 0 0`. This ensures a delta of 5 is added to each imputed missing value following an ICE, which we here assume to occur at the visit 2:\n\n| | Visit 1 | Visit 2 | Visit 3 | Visit 4 |\n|----------------|---------|---------|---------|---------|\n| Delta | 5 | 5 | 5 | 5 |\n| Dlag | 0 | 1 | 0 | 0 |\n| Delta \\* dlag | 0 | 0 | 0 | 0 |\n| Cumulative sum | 0 | 5 | 5 | 5 |\n\nTo apply this delta = 5 to both groups we leave `DGroups` unspecified.\n\n```sas\n%part3(Jobname = HAMD_MAR, \n anref=PLACEBO, \n Delta = 5 5 5 5, \n DLag = 1 0 0 0, \n Label=MAR);\n```\n\nIn the `five macros`, delta adjustments are not applied to intermittent missing observations, but only to missing observations after withdrawal. From the documentation, it seems like this cannot be altered. In contrast, the choice of which missing data to apply delta adjustments to can be more freely managed using the `rbmi` R package. This may lead to discrepancies between tipping point analyses conducted in SAS and R, and may have important implications for datasets with high proportions of intermittent missing values in particular.\n\n[**Note:**]{.underline} By making use of the `DGroupsV` argument in `Part3` (see `five macros` documentation) one can specify a variable in the dataset that indicates whether a delta adjustment should be applied to any imputed value for the record after withdrawal. The `five macros` documentation states that \"*As an alternative to DGroups=, this option specifies a numeric variable with a logical value indicating whether Delta should be applied to any imputed value for the record. This variable should be Numeric holding a logical value.*\". However, from our experience the intermittent missing values are not delta adjusted using this argument, only the missing values after withdrawal.\n\n[**Note:**]{.underline} By making use of the `DeltaV` argument in `Part3` (see `five macros` documentation) one can set specific delta adjustments by records.\n\n## References\n\n[Cro et al. 2020](https://pubmed.ncbi.nlm.nih.gov/32419182/). Sensitivity analysis for clinical trials with missing continuous outcome data using controlled multiple imputation: A practical guide. *Statistics in Medicine*. 2020;39(21):2815-2842.\n\n[rbmi: Advanced Functionality](https://cran.r-project.org/web/packages/rbmi/vignettes/advanced.html#sensitivity-analyses-delta-adjustments-and-tipping-point-analyses)\n\n[rbmi: Quickstart](https://cran.r-project.org/web/packages/rbmi/vignettes/quickstart.html)\n\n[Roger 2022](https://baselbiometrics.github.io/home/docs/talks/20221208/5_JamesRoger%2020121118.pdf). Other statistical software for continuous longitudinal endpoints: SAS macros for multiple imputation. Addressing intercurrent events: Treatment policy and hypothetical strategies. *Joint EFSPI and BBS virtual event.*\n\n[Roger 2017](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data). Fitting reference-based models for missing data to longitudinal repeated-measures Normal data. User guide five macros.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/tobit regression SAS/execute-results/html.json b/_freeze/SAS/tobit regression SAS/execute-results/html.json index fdda8a3b0..797dea02f 100644 --- a/_freeze/SAS/tobit regression SAS/execute-results/html.json +++ b/_freeze/SAS/tobit regression SAS/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "b00a2ab9374a4d0c69843bb63e3e379f", + "hash": "a1f66c5acf6bb5e7bba8407eb7531f22", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Tobit Regression\"\nexecute: \n eval: false\n---\n\n# Tobit regression\n\n## Tobit model\n\nCensoring occurs when data on the dependent variable is only partially known. For example, in virology, sample results could be below the lower limit of detection (eg, 100 copies/mL) and in such a case we only know that the sample result is \\<100 copies/mL, but we don't know the exact value.\n\nLet $y^{*}$ be the the true underlying latent variable, and $y$ the observed variable. We discuss here censoring on the left:\n\n$$\ny =\n\\begin{cases}\n y^{*}, & y^{*} > \\tau \\\\\n \\tau, & y^{*} \\leq \\tau\n \\end{cases} \n$$ We consider tobit regression with a censored normal distribution. The model equation is $$\ny_{i}^{*} = X_{i}\\beta + \\epsilon_{i} \n$$ with $\\epsilon_{i} \\sim N(0,\\sigma^2)$. But we only observe $y = max(\\tau, y^{*})$. The tobit model uses maximum likelihood estimation (for details see for example Breen, 1996). It is important to note that $\\beta$ estimates the effect of $x$ on the latent variable $y^{*}$, and not on the observed value $y$.\n\n## Data used\n\nWe assume two equally sized groups (n=10 in each group). The data is censored on the left at a value of $\\tau=8.0$. In group A 4/10 records are censored, and 1/10 in group B.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata dat_used;\n input ID$ ARM$ Y CENS;\n cards;\n 001 A 8.0 1 \n 002 A 8.0 1\n 003 A 8.0 1\n 004 A 8.0 1\n 005 A 8.9 0\n 006 A 9.5 0\n 007 A 9.9 0\n 008 A 10.3 0\n 009 A 11.0 0\n 010 A 11.2 0\n 011 B 8.0 1 \n 012 B 9.2 0\n 013 B 9.9 0\n 014 B 10.0 0\n 015 B 10.6 0\n 016 B 10.6 0\n 017 B 11.3 0\n 018 B 11.8 0\n 019 B 12.9 0\n 020 B 13.0 0\n\t;\nrun;\n```\n:::\n\n\n## Example Code using SAS\n\nThe analysis will be based on a Tobit analysis of variance with $Y$, rounded to 1 decimal places, as dependent variable and study group as a fixed covariate. A normally distributed error term will be used. Values will be left censored at the value 8.0.\n\nFirst a data manipulation step needs to be performed in which the censored values are set to missing for a new variable called *lower*.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata dat_used;\n set dat_used;\n if Y <= 8.0 then lower=.; else lower=Y;\nrun;\n```\n:::\n\n\nThe data are sorted to make sure the intercept will correspond to the mean of ARM A.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc sort data=dat_used;\n by descending ARM;\nrun;\n```\n:::\n\n\nThe **LIFEREG** procedure is used for tobit regression. The following model syntax is used:\n\n``` default\n MODEL (lower,upper)= effects / options ;\n```\n\nHere, if the *lower* value is missing, then the *upper* value is used as a left-censored value.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc lifereg data=dat_used order=data;\n class ARM;\n model (lower, Y) = ARM / d=normal;\n lsmeans ARM /cl alpha=0.05;\n estimate 'Contrast B-A' ARM 1 -1 / alpha=0.05;\nrun;\n```\n:::\n\n\nThe fit statistics, type 3 analysis of effects and parameter estimated are shown here. The output provides an estimate of difference between groups A and B (B-A), namely 1.8225 (se=0.8061). The presented p-value is a two-sided p-value based on the Z-test. The scale parameter is an estimate for $\\sigma$.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tobit/SAS_tobit_1.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\nThe p-value and confidence intervals of the contrast B-A are shown here. The p-value is the same as above.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tobit/SAS_tobit_2.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\n## Reference\n\nBreen, R. (1996). Regression models. SAGE Publications, Inc., https://doi.org/10.4135/9781412985611\n\nTobin, James (1958). \"Estimation of Relationships for Limited Dependent Variables\". Econometrica. 26 (1): 24-36. doi:10.2307/1907382", + "markdown": "---\ntitle: \"Tobit Regression\"\n---\n\n# Tobit regression\n\n## Tobit model\n\nCensoring occurs when data on the dependent variable is only partially known. For example, in virology, sample results could be below the lower limit of detection (eg, 100 copies/mL) and in such a case we only know that the sample result is \\<100 copies/mL, but we don't know the exact value.\n\nLet $y^{*}$ be the the true underlying latent variable, and $y$ the observed variable. We discuss here censoring on the left:\n\n$$\ny =\n\\begin{cases}\n y^{*}, & y^{*} > \\tau \\\\\n \\tau, & y^{*} \\leq \\tau\n \\end{cases} \n$$ We consider tobit regression with a censored normal distribution. The model equation is $$\ny_{i}^{*} = X_{i}\\beta + \\epsilon_{i} \n$$ with $\\epsilon_{i} \\sim N(0,\\sigma^2)$. But we only observe $y = max(\\tau, y^{*})$. The tobit model uses maximum likelihood estimation (for details see for example Breen, 1996). It is important to note that $\\beta$ estimates the effect of $x$ on the latent variable $y^{*}$, and not on the observed value $y$.\n\n## Data used\n\nWe assume two equally sized groups (n=10 in each group). The data is censored on the left at a value of $\\tau=8.0$. In group A 4/10 records are censored, and 1/10 in group B.\n\n```sas\ndata dat_used;\n input ID$ ARM$ Y CENS;\n cards;\n 001 A 8.0 1 \n 002 A 8.0 1\n 003 A 8.0 1\n 004 A 8.0 1\n 005 A 8.9 0\n 006 A 9.5 0\n 007 A 9.9 0\n 008 A 10.3 0\n 009 A 11.0 0\n 010 A 11.2 0\n 011 B 8.0 1 \n 012 B 9.2 0\n 013 B 9.9 0\n 014 B 10.0 0\n 015 B 10.6 0\n 016 B 10.6 0\n 017 B 11.3 0\n 018 B 11.8 0\n 019 B 12.9 0\n 020 B 13.0 0\n\t;\nrun;\n```\n\n## Example Code using SAS\n\nThe analysis will be based on a Tobit analysis of variance with $Y$, rounded to 1 decimal places, as dependent variable and study group as a fixed covariate. A normally distributed error term will be used. Values will be left censored at the value 8.0.\n\nFirst a data manipulation step needs to be performed in which the censored values are set to missing for a new variable called *lower*.\n\n```sas\ndata dat_used;\n set dat_used;\n if Y <= 8.0 then lower=.; else lower=Y;\nrun;\n```\n\nThe data are sorted to make sure the intercept will correspond to the mean of ARM A.\n\n```sas\nproc sort data=dat_used;\n by descending ARM;\nrun;\n```\n\nThe **LIFEREG** procedure is used for tobit regression. The following model syntax is used:\n\n``` default\n MODEL (lower,upper)= effects / options ;\n```\n\nHere, if the *lower* value is missing, then the *upper* value is used as a left-censored value.\n\n```sas\nproc lifereg data=dat_used order=data;\n class ARM;\n model (lower, Y) = ARM / d=normal;\n lsmeans ARM /cl alpha=0.05;\n estimate 'Contrast B-A' ARM 1 -1 / alpha=0.05;\nrun;\n```\n\nThe fit statistics, type 3 analysis of effects and parameter estimated are shown here. The output provides an estimate of difference between groups A and B (B-A), namely 1.8225 (se=0.8061). The presented p-value is a two-sided p-value based on the Z-test. The scale parameter is an estimate for $\\sigma$.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tobit/SAS_tobit_1.PNG){fig-align='center' width=100%}\n:::\n:::\n\n\nThe p-value and confidence intervals of the contrast B-A are shown here. The p-value is the same as above.\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/tobit/SAS_tobit_2.PNG){fig-align='center' width=80%}\n:::\n:::\n\n\n## Reference\n\nBreen, R. (1996). Regression models. SAGE Publications, Inc., https://doi.org/10.4135/9781412985611\n\nTobin, James (1958). \"Estimation of Relationships for Limited Dependent Variables\". Econometrica. 26 (1): 24-36. doi:10.2307/1907382", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/ttest_1Sample/execute-results/html.json b/_freeze/SAS/ttest_1Sample/execute-results/html.json index 4a957be7b..ec7f47999 100644 --- a/_freeze/SAS/ttest_1Sample/execute-results/html.json +++ b/_freeze/SAS/ttest_1Sample/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "8751368916c07000649a617d14f410dc", + "hash": "e3e7d963b5d351b83935e6ff6af4cd77", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"One Sample t-test in SAS\"\noutput: html_document\nexecute: \n eval: false\n---\n\n\n\nIn SAS, a one sample t-test is usually performed using PROC TTEST. The one sample t-test compares the mean of the sample to a provided null hypothesis, called \"h0\". The h0 value is provided as an option. By default, the h0 value is zero (0). Running the procedure produces a set of results that suggest whether or not the null hypothesis should be rejected.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata read;\n input score count @@;\n datalines;\n 40 2 47 2 52 2 26 1 19 2\n 25 2 35 4 39 1 26 1 48 1\n 14 2 22 1 42 1 34 2 33 2\n 18 1 15 1 29 1 41 2 44 1\n 51 1 43 1 27 2 46 2 28 1\n 49 1 31 1 28 1 54 1 45 1\n ;\n```\n:::\n\n\n## Normal Data {#normal}\n\nBy default, SAS PROC TTEST t-test assumes normality in the data and uses a classic Student's t-test.\n\n### Code\n\nThe following code was used to test the comparison of a reading scores against a baseline hypothesis value of 30:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc ttest data=read h0=30;\n var score;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/onesample_test1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe SAS one sample t-test also supports lognormal analysis for a one sample t-test.\n\n### Code\n\nUsing the same data as above, we will set the \"DIST\" option to \"lognormal\" to perform this analysis:\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc ttest data=read h0=30 dist=lognormal;\n var score;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/onesample_test2.png){fig-align='center' width=60%}\n:::\n:::\n\n\nAs can be seen in the figure above, the lognormal variation of the one sample TTEST provides results for geometric mean, coefficient of variation, and 95% confidence limits for the coefficient of variation.", + "markdown": "---\ntitle: \"One Sample t-test in SAS\"\noutput: html_document\n---\n\n\n\nIn SAS, a one sample t-test is usually performed using PROC TTEST. The one sample t-test compares the mean of the sample to a provided null hypothesis, called \"h0\". The h0 value is provided as an option. By default, the h0 value is zero (0). Running the procedure produces a set of results that suggest whether or not the null hypothesis should be rejected.\n\n### Data Used\n\nThe following data was used in this example.\n\n```sas\ndata read;\n input score count @@;\n datalines;\n 40 2 47 2 52 2 26 1 19 2\n 25 2 35 4 39 1 26 1 48 1\n 14 2 22 1 42 1 34 2 33 2\n 18 1 15 1 29 1 41 2 44 1\n 51 1 43 1 27 2 46 2 28 1\n 49 1 31 1 28 1 54 1 45 1\n ;\n```\n\n## Normal Data {#normal}\n\nBy default, SAS PROC TTEST t-test assumes normality in the data and uses a classic Student's t-test.\n\n### Code\n\nThe following code was used to test the comparison of a reading scores against a baseline hypothesis value of 30:\n\n```sas\nproc ttest data=read h0=30;\n var score;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/onesample_test1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe SAS one sample t-test also supports lognormal analysis for a one sample t-test.\n\n### Code\n\nUsing the same data as above, we will set the \"DIST\" option to \"lognormal\" to perform this analysis:\n\n```sas\nproc ttest data=read h0=30 dist=lognormal;\n var score;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/onesample_test2.png){fig-align='center' width=60%}\n:::\n:::\n\n\nAs can be seen in the figure above, the lognormal variation of the one sample TTEST provides results for geometric mean, coefficient of variation, and 95% confidence limits for the coefficient of variation.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/ttest_2Sample/execute-results/html.json b/_freeze/SAS/ttest_2Sample/execute-results/html.json index 9be6277de..a7c9e5bf7 100644 --- a/_freeze/SAS/ttest_2Sample/execute-results/html.json +++ b/_freeze/SAS/ttest_2Sample/execute-results/html.json @@ -1,11 +1,9 @@ { - "hash": "0a7d7ff99c14fc61460b7ddbc4a34fd6", + "hash": "b888eb16a5a502f222260cb0be7b35b1", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Independent Two-Sample t-test\"\noutput: html_document\nexecute: \n eval: false\n---\n\n\n\n## Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata d1;\n length trt_grp $ 9;\n input trt_grp $ WtGain @@;\n datalines;\nplacebo 94 placebo 12 placebo 26 placebo 89 \nplacebo 88 placebo 96 placebo 85 placebo 130 \nplacebo 75 placebo 54 placebo 112 placebo 69 \nplacebo 104 placebo 95 placebo 53 placebo 21 \ntreatment 45 treatment 62 treatment 96 treatment 128 \ntreatment 120 treatment 99 treatment 28 treatment 50 \ntreatment 109 treatment 115 treatment 39 treatment 96 \ntreatment 87 treatment 100 treatment 76 treatment 80 \n;\nrun;\n```\n:::\n\n\n## **Independent Two-Sample t-test in SAS** {#sas}\n\nThe null hypothesis of the Independent Samples t-test is, the means for the two populations are equal.\n\nIn SAS the following code was used to test the mean comparison (mean of Weight Gain) of two independent treatment groups (Treatment and Placebo).\nBoth the Student's t-test and Welch's t-test (the Satterthwaite approximation is used to calculate the effective degrees of freedom and variance) in the same call.\n\nFor this example, we're testing the significant difference in mean of Weight gain (*WtGain*) between treatment and placebo (*trt_grp*) using PROC TTEST procedure in SAS.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc ttest data=d1; \n class trt_grp; \n var WtGain; \nrun; \n```\n:::\n\n\nOutput:\n\n``` default\n Figure 1: Test results for independent t-test using PROC TTEST in SAS\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/test1.png){fig-align='center' width=50%}\n:::\n:::\n\n\nHere the t-value is --0.70, degrees of freedom is 30 and P value is 0.4912 which is greater than 0.05, so we accept the null hypothesis that there is no evidence of a significant difference between the means of treatment groups. The mean in placebo group is 75.1875 and mean in Treatment group is 83.1250. The mean difference the treatment groups (Treatment-Placebo) is --7.9375 and the 95% CI for the mean difference is \\[--31.1984, 15.3234\\]. The 95% confidence interval includes a treatment difference of 0, which supports the conclusion that the data fail to provide any evidence of a difference between the treatment groups.\n\n## Model Checking\n\nNote: Before entering straight into the t-test we need to check whether the assumptions (like the equality of variance, the observations should be independent, observations should be normally distributed) are met or not. If normality is not satisfied, we may consider using a suitable non-parametric test.\n\n1. Normality: You can check for data to be normally distributed by plotting a histogram of the data by treatment. Alternatively, you can use the Shapiro-Wilk test or the Kolmogorov-Smirnov test. If the test is \\<0.05 and your sample is quite small then this suggests you should not use the t-test. However, if your sample in each treatment group is large (say \\>30 in each group), then you do not need to rely so heavily on the assumption that the data have an underlying normal distribution in order to apply the two-sample t-test. This is where plotting the data using histograms can help to support investigation into the normality assumption. We have checked the normality of the observations using the code below. Here for both the treatment groups we have P value greater than 0.05 (Shapiro-Wilk test is used), therefore the normality assumption is there for our data.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc univariate data=d1 normal; \n qqplot WtGain; \n by trt_grp; \nrun; \n```\n:::\n\n\nOutput:\n\n``` default\n Figure 2: The results of normality test for Treatment group\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/trt_sas.png){fig-align='center' width=30%}\n:::\n:::\n\n\n``` default\n Figure 3: The results of normality test for Placebo group\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/placb_sas.png){fig-align='center' width=30%}\n:::\n:::\n\n\n2. Homogeneity of variance (or Equality of variance): Homogeniety of variance will be tested by default in PROC TTEST itself by Folded F-test. In our case the P values is 0.6981 which is greater than 0.05. So we accept the null hypothesis of F-test, i.e. variances are same. Then we will consider the pooled method for t-test. If the F test is statistically significant (p\\<0.05), then the pooled t-test may give erroneous results. In this instance, if it is believed that the population variances may truly differ, then the Satterthwaite (unequal variances) analysis results should be used. These are provided in the SAS output alongside the Pooled results as default.\n\nOutput:\n\n``` default\n Figure 4: Folded F-test result in PROC TTEST\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/variance_sas.png){fig-align='center' width=30%}\n:::\n:::\n\n\n\n## Example with unequal variances\n\nFor this example, it is important to use the Welch's t-test (the Satterthwaite approximation is used to calculate the effective degrees of freedom and variance) results.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata d2;\n length trt_grp $ 9;\n input trt_grp $ WtGain @@;\n datalines;\nplacebo 14 placebo 15 placebo 15 placebo 15 \nplacebo 16 placebo 18 placebo 22 placebo 23 \nplacebo 24 placebo 25 placebo 25\ntreatment 10 treatment 12 treatment 14 treatment 15 \ntreatment 18 treatment 22 treatment 24 treatment 27 \ntreatment 31 treatment 33 treatment 34 treatment 34 \ntreatment 34 \n;\nrun;\n\nproc ttest data=d2; \n class trt_grp; \n var WtGain; \nrun;\n```\n:::\n\n\nOutput:\n\n```default\n Figure 1: Test results for independent t-test using PROC TTEST in SAS\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/test2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n", - "supporting": [ - "ttest_2Sample_files" - ], + "markdown": "---\ntitle: \"Independent Two-Sample t-test\"\noutput: html_document\n---\n\n\n\n## Data Used\n\nThe following data was used in this example.\n\n```sas\ndata d1;\n length trt_grp $ 9;\n input trt_grp $ WtGain @@;\n datalines;\nplacebo 94 placebo 12 placebo 26 placebo 89 \nplacebo 88 placebo 96 placebo 85 placebo 130 \nplacebo 75 placebo 54 placebo 112 placebo 69 \nplacebo 104 placebo 95 placebo 53 placebo 21 \ntreatment 45 treatment 62 treatment 96 treatment 128 \ntreatment 120 treatment 99 treatment 28 treatment 50 \ntreatment 109 treatment 115 treatment 39 treatment 96 \ntreatment 87 treatment 100 treatment 76 treatment 80 \n;\nrun;\n```\n\n## **Independent Two-Sample t-test in SAS** {#sas}\n\nThe null hypothesis of the Independent Samples t-test is, the means for the two populations are equal.\n\nIn SAS the following code was used to test the mean comparison (mean of Weight Gain) of two independent treatment groups (Treatment and Placebo).\nBoth the Student's t-test and Welch's t-test (the Satterthwaite approximation is used to calculate the effective degrees of freedom and variance) in the same call.\n\nFor this example, we're testing the significant difference in mean of Weight gain (*WtGain*) between treatment and placebo (*trt_grp*) using PROC TTEST procedure in SAS.\n\n```sas\nproc ttest data=d1; \n class trt_grp; \n var WtGain; \nrun; \n```\n\nOutput:\n\n``` default\n Figure 1: Test results for independent t-test using PROC TTEST in SAS\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/test1.png){fig-align='center' width=50%}\n:::\n:::\n\n\nHere the t-value is --0.70, degrees of freedom is 30 and P value is 0.4912 which is greater than 0.05, so we accept the null hypothesis that there is no evidence of a significant difference between the means of treatment groups. The mean in placebo group is 75.1875 and mean in Treatment group is 83.1250. The mean difference the treatment groups (Treatment-Placebo) is --7.9375 and the 95% CI for the mean difference is \\[--31.1984, 15.3234\\]. The 95% confidence interval includes a treatment difference of 0, which supports the conclusion that the data fail to provide any evidence of a difference between the treatment groups.\n\n## Model Checking\n\nNote: Before entering straight into the t-test we need to check whether the assumptions (like the equality of variance, the observations should be independent, observations should be normally distributed) are met or not. If normality is not satisfied, we may consider using a suitable non-parametric test.\n\n1. Normality: You can check for data to be normally distributed by plotting a histogram of the data by treatment. Alternatively, you can use the Shapiro-Wilk test or the Kolmogorov-Smirnov test. If the test is \\<0.05 and your sample is quite small then this suggests you should not use the t-test. However, if your sample in each treatment group is large (say \\>30 in each group), then you do not need to rely so heavily on the assumption that the data have an underlying normal distribution in order to apply the two-sample t-test. This is where plotting the data using histograms can help to support investigation into the normality assumption. We have checked the normality of the observations using the code below. Here for both the treatment groups we have P value greater than 0.05 (Shapiro-Wilk test is used), therefore the normality assumption is there for our data.\n\n```sas\nproc univariate data=d1 normal; \n qqplot WtGain; \n by trt_grp; \nrun; \n```\n\nOutput:\n\n``` default\n Figure 2: The results of normality test for Treatment group\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/trt_sas.png){fig-align='center' width=30%}\n:::\n:::\n\n\n``` default\n Figure 3: The results of normality test for Placebo group\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/placb_sas.png){fig-align='center' width=30%}\n:::\n:::\n\n\n2. Homogeneity of variance (or Equality of variance): Homogeniety of variance will be tested by default in PROC TTEST itself by Folded F-test. In our case the P values is 0.6981 which is greater than 0.05. So we accept the null hypothesis of F-test, i.e. variances are same. Then we will consider the pooled method for t-test. If the F test is statistically significant (p\\<0.05), then the pooled t-test may give erroneous results. In this instance, if it is believed that the population variances may truly differ, then the Satterthwaite (unequal variances) analysis results should be used. These are provided in the SAS output alongside the Pooled results as default.\n\nOutput:\n\n``` default\n Figure 4: Folded F-test result in PROC TTEST\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/variance_sas.png){fig-align='center' width=30%}\n:::\n:::\n\n\n\n## Example with unequal variances\n\nFor this example, it is important to use the Welch's t-test (the Satterthwaite approximation is used to calculate the effective degrees of freedom and variance) results.\n\n```sas\ndata d2;\n length trt_grp $ 9;\n input trt_grp $ WtGain @@;\n datalines;\nplacebo 14 placebo 15 placebo 15 placebo 15 \nplacebo 16 placebo 18 placebo 22 placebo 23 \nplacebo 24 placebo 25 placebo 25\ntreatment 10 treatment 12 treatment 14 treatment 15 \ntreatment 18 treatment 22 treatment 24 treatment 27 \ntreatment 31 treatment 33 treatment 34 treatment 34 \ntreatment 34 \n;\nrun;\n\nproc ttest data=d2; \n class trt_grp; \n var WtGain; \nrun;\n```\n\nOutput:\n\n```default\n Figure 1: Test results for independent t-test using PROC TTEST in SAS\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/test2.png){fig-align='center' width=50%}\n:::\n:::\n\n\n", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/_freeze/SAS/ttest_Paired/execute-results/html.json b/_freeze/SAS/ttest_Paired/execute-results/html.json index 7e0a7f98a..0b6984f45 100644 --- a/_freeze/SAS/ttest_Paired/execute-results/html.json +++ b/_freeze/SAS/ttest_Paired/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "3930c678d8a0143329043b706c71276e", + "hash": "17f88f60bec7ad2e17530b37be984aaf", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Paired t-test\"\nexecute: \n eval: false\n---\n\n\n\n# **Paired t-test in SAS**\n\nThe Paired t-test is used when two samples are naturally correlated. In the Paired t-test, the difference of the means between the two samples is compared to a given number that represents the null hypothesis. For a Paired t-test, the number of observations in each sample must be equal.\n\nIn SAS, a Paired t-test is typically performed using PROC TTEST.\n\n## Normal Data {#normal}\n\nBy default, SAS PROC TTEST t-test assumes normality in the data and uses a classic Student's t-test.\n\n### Data Used\n\nThe following data was used in this example.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata pressure;\n input SBPbefore SBPafter @@;\n datalines;\n 120 128 124 131 130 131 118 127\n 140 132 128 125 140 141 135 137\n 126 118 130 132 126 129 127 135\n ;\n```\n:::\n\n\n### Code\n\nThe following code was used to test the comparison of two paired samples of Systolic Blood Pressure before and after a procedure.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc ttest data=pressure;\n paired SBPbefore*SBPafter;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/paired_test1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe SAS paired t-test also supports analysis of lognormal data. Here is the data used for the lognormal analysis.\n\n### Data\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata auc;\n input TestAUC RefAUC @@;\n datalines;\n 103.4 90.11 59.92 77.71 68.17 77.71 94.54 97.51\n 69.48 58.21 72.17 101.3 74.37 79.84 84.44 96.06\n 96.74 89.30 94.26 97.22 48.52 61.62 95.68 85.80\n ;\n```\n:::\n\n\n### Code\n\nFor cases when the data is lognormal, SAS offers the \"DIST\" option to chose between a normal and lognormal distribution. The procedure also offers the TOST option to specify the equivalence bounds.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc ttest data=auc dist=lognormal tost(0.8, 1.25);\n paired TestAUC*RefAUC;\nrun;\n```\n:::\n\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/paired_test2.png){fig-align='center' width=70%}\n:::\n:::\n\n\nAs can be seen in the figure above, the lognormal variation of the TTEST procedure offers additional results for geometric mean, coefficient of variation, and TOST equivalence analysis. The output also includes multiple p-values.", + "markdown": "---\ntitle: \"Paired t-test\"\n---\n\n\n\n# **Paired t-test in SAS**\n\nThe Paired t-test is used when two samples are naturally correlated. In the Paired t-test, the difference of the means between the two samples is compared to a given number that represents the null hypothesis. For a Paired t-test, the number of observations in each sample must be equal.\n\nIn SAS, a Paired t-test is typically performed using PROC TTEST.\n\n## Normal Data {#normal}\n\nBy default, SAS PROC TTEST t-test assumes normality in the data and uses a classic Student's t-test.\n\n### Data Used\n\nThe following data was used in this example.\n\n```sas\ndata pressure;\n input SBPbefore SBPafter @@;\n datalines;\n 120 128 124 131 130 131 118 127\n 140 132 128 125 140 141 135 137\n 126 118 130 132 126 129 127 135\n ;\n```\n\n### Code\n\nThe following code was used to test the comparison of two paired samples of Systolic Blood Pressure before and after a procedure.\n\n```sas\nproc ttest data=pressure;\n paired SBPbefore*SBPafter;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/paired_test1.png){fig-align='center' width=50%}\n:::\n:::\n\n\n## Lognormal Data {#lognormal}\n\nThe SAS paired t-test also supports analysis of lognormal data. Here is the data used for the lognormal analysis.\n\n### Data\n\n```sas\ndata auc;\n input TestAUC RefAUC @@;\n datalines;\n 103.4 90.11 59.92 77.71 68.17 77.71 94.54 97.51\n 69.48 58.21 72.17 101.3 74.37 79.84 84.44 96.06\n 96.74 89.30 94.26 97.22 48.52 61.62 95.68 85.80\n ;\n```\n\n### Code\n\nFor cases when the data is lognormal, SAS offers the \"DIST\" option to chose between a normal and lognormal distribution. The procedure also offers the TOST option to specify the equivalence bounds.\n\n```sas\nproc ttest data=auc dist=lognormal tost(0.8, 1.25);\n paired TestAUC*RefAUC;\nrun;\n```\n\nOutput:\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/ttest/paired_test2.png){fig-align='center' width=70%}\n:::\n:::\n\n\nAs can be seen in the figure above, the lognormal variation of the TTEST procedure offers additional results for geometric mean, coefficient of variation, and TOST equivalence analysis. The output also includes multiple p-values.", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/SAS/wilcoxonsr_HL/execute-results/html.json b/_freeze/SAS/wilcoxonsr_HL/execute-results/html.json index 49bb4823c..d09b36e5a 100644 --- a/_freeze/SAS/wilcoxonsr_HL/execute-results/html.json +++ b/_freeze/SAS/wilcoxonsr_HL/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "2b6c8930faf5bc766be15dbd7d913581", + "hash": "e9a9d7e3609b0079e2f4720239cdce50", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Wilcoxon signed-rank test in SAS & StatXact®\"\nexecute: \n eval: false\n---\n\n### **Introduction**\n\nSimilarily to what has been presented in R, we will explore the options of Wilcoxon Signed-Rank test that are avialable in SAS & StatXact.We will consider case with N\\>=20 or N\\<20 and without or with ties. For more information how to perform this analysis in R go [here](../R/wilcoxonsr_hodges_lehman.qmd)\n\n### Data {.unnumbered}\n\nAnalysis was be conducted on the same example dataset as in R.\n\nAgain, wilcoxon signed rank test was applied to analyse the time to return to baseline FEV1 post-mannitol challenge 2.\n\n## Analysis in SAS {.unnumbered}\n\n### Dataset without ties and N \\> 20 {.unnumbered}\n\nLet's consider a case where the dataset has no ties and N (number of observations) = 240.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata TTR;\n set TTR;\n diff = TRT_B - TRT_A;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsr_data.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nIn SAS Wilcoxon Signed-Rank test is available using PROC UNIVARIATE.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc univariate data=TTR\n alpha=0.1;\n var diff;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n\n:::\n\n\n### Dataset without ties and N≤20 {.unnumbered}\n\nNow let's consider a smaller dataset, created by selecting first 19 observations from our main data.\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata TTR_19;\n set TTR;\n if _N_ <= 19;\nrun;\n```\n:::\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\nproc univariate data=TTR_19\n alpha=0.1;\n var diff;\nrun;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsr_19.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n### Important notes on SAS {.unnumbered}\n\n- Only PROC UNIVARIATE can be used in SAS to perform Wilcoxon Signed-Rank test. SAS documentation details are [here](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/procstat/procstat_univariate_syntax01.htm).\n- In regards to Wilcoxon S-R test, SAS provides only p value\n- Hodges-Lehmann estimator or CI are not available and have to be implemented manually\n- Provided p value is based on Signed Rank (S) statistic (modification of a common T+). Details are [here](https://blogs.sas.com/content/iml/2023/07/19/wilcoxon-signed-rank.html)\n- SAS computes exact p values only for N ≤ 20. For larger samples uses an asymptotic t-Student distribution of the test statistic. For more information how the p value is calculated go [here](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_univariate_sect029.htm)\n- PROC UNIVARIATE apart from performing Wilcoxon S-R test presents as well basic statistical measures of variability and location, e.g median. The given median is not a \"pseudo-median\" (median of the Walsh averages), it is a \"normal\" median of the considered variable.\n- Using CIQUANTNORMAL option we can get confidence limits for quantiles based on normal distribution. There are 5 different definitions for calculation quantiles available. See details from the SAS documentation [here](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/procstat/procstat_univariate_details14.htm). It is important to note, those are not confidence intervals of estimator.\n\n### Approach to 0s and ties in SAS {.unnumbered}\n\n- In SAS all the 0 differences are disregarded (Hollander and Wolfe, 1973). The sample size N is reduced to reflect the number of discarded zeros.\n- Tied differences are given an average of the ranks. Statistic S is updated accordingly following Sprent algorythm (Sprent, 1993).\n\n## Analysis in StatXact® {.unnumbered}\n\nStatXact® PROCs for SAS users is a clinical trial analysis software from Cytel for exact statistics. Package includes more than 150 procedures for exact inference statistical data and power analysis.\n\n### Dataset without ties and N \\> 20 {.unnumbered}\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n/* Wilxocon S-R test - p values */\nPROC PAIRED DATA=WilcoxonSignedRank_TTR\n ALPHA=0.9;\n WI/EX;\n POPS TRT_B - TRT_A;\nRUN;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsrSX_240a.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n/* Wilcoxon S-R - H-L estimator and CI */\nPROC PAIRED DATA=WilcoxonSignedRank_TTR \n ALPHA=0.9;\n HL/EX;\n POPS TRT_B - TRT_A;\nRUN;\n```\n:::\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsrSX_240b.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n### Important notes on StatXact® {.unnumbered}\n\n- Only PROC PAIRED can be used in StatXact to perform Wilcoxon Signed-Rank test\n- Follows Sprent (1993) approach for Wilcoxon Signed-Rank test and Lehmann (1975) for H-L estimate and CI\n- Provides exact/non-exact p values, (exact) H-L estimator and exact/non-exact CIs\n- p value is based on a common T+ statistic (sum of ranks of the positive differences)\n\n### Approach to 0s and ties in StatXact® {.unnumbered}\n\n- Using ZEROS option we can compute H-L estimate including all differences, but by default 0s are excluded.\n- Tied differences are given an average of the ranks. Statistic S is updated accordingly following Sprent algorythm (Sprent, 1993).", + "markdown": "---\ntitle: \"Wilcoxon signed-rank test in SAS & StatXact®\"\n---\n\n### **Introduction**\n\nSimilarily to what has been presented in R, we will explore the options of Wilcoxon Signed-Rank test that are avialable in SAS & StatXact.We will consider case with N\\>=20 or N\\<20 and without or with ties. For more information how to perform this analysis in R go [here](../R/wilcoxonsr_hodges_lehman.qmd)\n\n### Data {.unnumbered}\n\nAnalysis was be conducted on the same example dataset as in R.\n\nAgain, wilcoxon signed rank test was applied to analyse the time to return to baseline FEV1 post-mannitol challenge 2.\n\n## Analysis in SAS {.unnumbered}\n\n### Dataset without ties and N \\> 20 {.unnumbered}\n\nLet's consider a case where the dataset has no ties and N (number of observations) = 240.\n\n```sas\ndata TTR;\n set TTR;\n diff = TRT_B - TRT_A;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsr_data.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\nIn SAS Wilcoxon Signed-Rank test is available using PROC UNIVARIATE.\n\n```sas\nproc univariate data=TTR\n alpha=0.1;\n var diff;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsr_240.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n### Dataset without ties and N≤20 {.unnumbered}\n\nNow let's consider a smaller dataset, created by selecting first 19 observations from our main data.\n\n```sas\ndata TTR_19;\n set TTR;\n if _N_ <= 19;\nrun;\n```\n\n```sas\nproc univariate data=TTR_19\n alpha=0.1;\n var diff;\nrun;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsr_19.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n### Important notes on SAS {.unnumbered}\n\n- Only PROC UNIVARIATE can be used in SAS to perform Wilcoxon Signed-Rank test. SAS documentation details are [here](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/procstat/procstat_univariate_syntax01.htm).\n- In regards to Wilcoxon S-R test, SAS provides only p value\n- Hodges-Lehmann estimator or CI are not available and have to be implemented manually\n- Provided p value is based on Signed Rank (S) statistic (modification of a common T+). Details are [here](https://blogs.sas.com/content/iml/2023/07/19/wilcoxon-signed-rank.html)\n- SAS computes exact p values only for N ≤ 20. For larger samples uses an asymptotic t-Student distribution of the test statistic. For more information how the p value is calculated go [here](https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_univariate_sect029.htm)\n- PROC UNIVARIATE apart from performing Wilcoxon S-R test presents as well basic statistical measures of variability and location, e.g median. The given median is not a \"pseudo-median\" (median of the Walsh averages), it is a \"normal\" median of the considered variable.\n- Using CIQUANTNORMAL option we can get confidence limits for quantiles based on normal distribution. There are 5 different definitions for calculation quantiles available. See details from the SAS documentation [here](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/procstat/procstat_univariate_details14.htm). It is important to note, those are not confidence intervals of estimator.\n\n### Approach to 0s and ties in SAS {.unnumbered}\n\n- In SAS all the 0 differences are disregarded (Hollander and Wolfe, 1973). The sample size N is reduced to reflect the number of discarded zeros.\n- Tied differences are given an average of the ranks. Statistic S is updated accordingly following Sprent algorythm (Sprent, 1993).\n\n## Analysis in StatXact® {.unnumbered}\n\nStatXact® PROCs for SAS users is a clinical trial analysis software from Cytel for exact statistics. Package includes more than 150 procedures for exact inference statistical data and power analysis.\n\n### Dataset without ties and N \\> 20 {.unnumbered}\n\n```sas\n/* Wilxocon S-R test - p values */\nPROC PAIRED DATA=WilcoxonSignedRank_TTR\n ALPHA=0.9;\n WI/EX;\n POPS TRT_B - TRT_A;\nRUN;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsrSX_240a.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n```sas\n/* Wilcoxon S-R - H-L estimator and CI */\nPROC PAIRED DATA=WilcoxonSignedRank_TTR \n ALPHA=0.9;\n HL/EX;\n POPS TRT_B - TRT_A;\nRUN;\n```\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/wilcoxonsr/wsrSX_240b.PNG){fig-align='center' width=75%}\n:::\n:::\n\n\n### Important notes on StatXact® {.unnumbered}\n\n- Only PROC PAIRED can be used in StatXact to perform Wilcoxon Signed-Rank test\n- Follows Sprent (1993) approach for Wilcoxon Signed-Rank test and Lehmann (1975) for H-L estimate and CI\n- Provides exact/non-exact p values, (exact) H-L estimator and exact/non-exact CIs\n- p value is based on a common T+ statistic (sum of ranks of the positive differences)\n\n### Approach to 0s and ties in StatXact® {.unnumbered}\n\n- Using ZEROS option we can compute H-L estimate including all differences, but by default 0s are excluded.\n- Tied differences are given an average of the ranks. Statistic S is updated accordingly following Sprent algorythm (Sprent, 1993).", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/blogs/posts/202305_introduction_to_CAMIS_blog/execute-results/html.json b/_freeze/blogs/posts/202305_introduction_to_CAMIS_blog/execute-results/html.json deleted file mode 100644 index 79c582753..000000000 --- a/_freeze/blogs/posts/202305_introduction_to_CAMIS_blog/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "dd11661067c5f77be1bf919fc41b8ef3", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Introduction Comparing Analysis Method Implementations in Software (CAMIS)\"\n---\n\n\n\n\nAre you trying to replicate results using different software/languages and struggling to find out why you can't match the results? Check out the [CAMIS repository](https://psiaims.github.io/CAMIS)!\n\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../../images/logo.png){fig-align='center' width=50%}\n:::\n\n::: {.cell-output-display}\n![](../../images/qrcode_website.png){fig-align='center' width=50%}\n:::\n:::\n\n\n\n\nThe CAMIS repository stores documentation detailing the reasons for observed differences when performing statistical analysis in SAS and R. The repository is housed on github, and will be populated through open-source community contributions.\n\nDifferences between software could be due to different default and available options, including the methods being used. By documenting these known differences in a repository, we aim to reduce time-consuming efforts within the community, where multiple people are investigating the same issues. If you find an issue not already investigated, please log an Issue in github. If you have time to investigate and document the reason for the issue, then please submit a pull request with the new content in a quarto file. Details of how to contribute can be found on the website.\n\nCAMIS is a PHUSE working group in collaboration with PSI and the R consortium. Initially the repository contains R and SAS analysis result comparisons, however the team hope to extend to other software/languages in the near future. Our white paper will soon be available on the website. Please help us to build a high quality and comprehensive repository.\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/blogs/posts/202312_highlights_blog/execute-results/html.json b/_freeze/blogs/posts/202312_highlights_blog/execute-results/html.json deleted file mode 100644 index 49bec992b..000000000 --- a/_freeze/blogs/posts/202312_highlights_blog/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "011812e76b6c48cc3d8e1dd2d84f4617", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"2023: A Year of Progress for the PHUSE CAMIS Working Group Project\"\n---\n\n\n\n\nAs we draw towards the end of 2023, the PHUSE DVOST [CAMIS Working Group Project](https://advance.phuse.global/pages/viewpage.action?pageId=327874) reflect on their key progress and successes this year.\n\nThe [CAMIS repository](https://psiaims.github.io/CAMIS/) went live in January 2023, drawing on the content from the [CSRMLW Working Group](https://github.com/phuse-org/CSRMLW). This searchable repository compares analysis method implementations in software (CAMIS) such as SAS, R and python.\n\nThe [white paper](https://phuse.s3.eu-central-1.amazonaws.com/Deliverables/Data+Visualisation+%26+Open+Source+Technology/WP077.pdf), Key Considerations When Understanding Differences in Statistical Methodology Implementations Across Programming Languages -- An Introduction to the CAMIS Project was published in June, which highlighted the importance of clearly specifying your analysis, such that it can be replicated in different software and doesn't rely on default options, which can be different.\n\nFor more complex analyses, it can still be hard to understand what defaults and algorithms your software is using, so the team focused 2023 on expanding our github [repo](repo) content, comparing SAS vs R methods. By August, we had covered the following topics in the repo: quartiles, rounding, ANOVA, MMRM, the CMH test, log-rank, Cox PH, the McNemar test, the Kruskal-Wallis test and logistic. October saw the launch of the CAMIS-Oncology sub-group, led by Somasekhar Sriadibhatla (AstraZeneca). This team will focus specifically on oncology endpoints and analysing them in SAS, R and Python.\n\nThe CAMIS team have expanded in membership this year and presented at conferences around the world. In November, we welcomed Harshal Khanolkar (Novo Nordisk) to join the leadership team alongside Christina Fillmore (GSK) and Lyn Taylor (Parexel). Our focus for 2024 will be on creating additional content for the repo and sharing awareness of the project across the medical research and wider community.\n\nWe would like to take this opportunity to thank all of our team members and contributors, and encourage everyone to check out the repository and help us grow our content. If you would like to join the team, please get in touch through the github repo.\n\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/202312 highlights.jpg){fig-align='center' width=75%}\n:::\n:::\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/blogs/posts/202403_phuseUS2024/execute-results/html.json b/_freeze/blogs/posts/202403_phuseUS2024/execute-results/html.json deleted file mode 100644 index 227210c0e..000000000 --- a/_freeze/blogs/posts/202403_phuseUS2024/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "5d7f571cbca8faa1897b2452b6e7793f", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"PHUSE US connect 2024 Poster Presentation by Soma & Vikash\"\n---\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/202403PHUSE_US_CONNECT24_Soma_PP.jpeg){fig-align='center' width=75%}\n:::\n:::\n\n\n\n\nCongratulations, [Soma Sekhar Sriadibhatla](https://www.linkedin.com/in/soma-sekhar-s-20381416a/), on your poster presentation \"CAMIS-An open source repository to document differences in statistical methodology software\" at PHUSE US Connect 2024.\n\n\\\nA poster was presented on PHUSE DVOST-CAMIS effort to document discrepancies between programming languages such as SAS, R, and Python (due to software default choices) in order to conduct the same end-point analysis, a cheat sheet across multilingual languages, and the CAMIS-ONCO subgroup.\\\n\\\nWe are pleased to inform you that we have received positive feedback from the industry on our CAMIS repository, which helps them to eliminate the risk of doing end point analysis in a multilingual world.\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/blogs/posts/202503_Tobit_regression/execute-results/html.json b/_freeze/blogs/posts/202503_Tobit_regression/execute-results/html.json deleted file mode 100644 index bfce0718c..000000000 --- a/_freeze/blogs/posts/202503_Tobit_regression/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "b6bb3bcff089b6a2ebabfeb30e8fad20", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Tobit Regression Comparison between R and SAS\"\n---\n\n\n\n\nA recent CAMIS contribution explored the standard Tobit model for a virology endpoint (viral load) with a lower detection limit.\n\nTobit regression, a censored regression model, estimates linear relationships between independent variables and a dependent variable that is either left- or right-censored at a specific known value.\n\nThe implementations of Tobit regression in R and SAS were compared (link to full comparison on CAMIS website: [R vs SAS Tobit Regression](https://psiaims.github.io/CAMIS/Comp/r-sas_tobit.html)). In SAS, the LIFEREG procedure was used, which requires a specific structure in the MODEL statement, namely \"(lower, upper)\". Here, if the lower value is missing, then the upper value is used as a left-censored value.\n\nIn R, the censReg, survival, and VGAM packages were explored. The censReg() and survreg() (from the survival package) functions provided matching results with SAS LIFEREG. In both cases estimation is being done by the maximum likelihood approach. The vglm() function in VGAM showed slight numerical differences due to a different estimation technique. The VGAM package uses vector generalized linear and additive models which are estimated using an iteratively reweighted least squares (IRLS) algorithm.\n\nTypically, the Tobit model assumes normally distributed data, and the standard Tobit regression results matched between R and SAS when a normally distributed endpoint was assumed. Additionally, this comparison also highlighted the flexibility of Tobit regression implementations across different software (as well as the importance of being aware of different default and available options), with SAS LIFEREG and R's survival package offering multiple different distributional assumptions.\n\nCAMIS is a PHUSE working group in collaboration with PSI AIMS SIG. For more on CAMIS's goals and repository, as well as how to contribute, visit the CAMIS website: .\n\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/Tobit_comparison.jpg){fig-align='center' width=75%}\n:::\n:::\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/blogs/posts/202506_psiconference/execute-results/html.json b/_freeze/blogs/posts/202506_psiconference/execute-results/html.json deleted file mode 100644 index 16a5eed31..000000000 --- a/_freeze/blogs/posts/202506_psiconference/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "54c7ea5dbe4134f29419e3064df193e9", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"PSI Conference 2025\"\n---\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/202506PSI.jpg){fig-align='center' width=75%}\n:::\n:::\n\n\n\n\nThe PSI Conference 2025 took place 8th-11th June 2025 in Wembley, London.\n\nThe CAMIS team in collaboration with the [PSI AIMS SIG](https://psiaims.github.io/website/) presented 2 talks at the conference. PHUSE kindly supported the team with provision of PHUSE CAMIS t-shirts and stickers to hand out, which contained the QR code for the repo. This really helped us to stand out among the 400+ statisticians and to spread the word about the CAMIS project repository.\n\nThe first of the talks entitled: \"R you (all) right, SAS? Replicating statistical results between software\", contained an introduction to the CAMIS project presented by Lyn Taylor, followed by a case study example on how to compare methods across software and in particular methods to assess which r packages to use, presented by Christina Fillmore. Slides can be found here: [CAMIS](../../non_website_content/conferences/2025/202506_PSI_Christina_Lyn.pptx)\n\nIn the second talk entitled: (Sample) size matters - demonstrating sample size calculations across software, Agnieszka Tomczyk presented findings from replicating sample size methods in SAS, StatXact, EAST and R. Slides can be found here: [Samplesize](../../non_website_content/conferences/2025/202506_PSI_Agnieszka_Tomczyk.pptx)\n\nWe received really positive feedback from the industry on our CAMIS repository, with everyone agreeing it will help to eliminate duplication of researching the methodology differences between software.\n\nPlease help to promote CAMIS by sharing the sticker below !\n\n\n\n\n::: {.cell layout-align=\"center\"}\n::: {.cell-output-display}\n![](../images/2025stickers.png){fig-align='center' width=75%}\n:::\n:::\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/contribution/Package-Review/execute-results/html.json b/_freeze/contribution/Package-Review/execute-results/html.json deleted file mode 100644 index a0890d646..000000000 --- a/_freeze/contribution/Package-Review/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "ec7a0fb8e51eaba986ad50dd43ee3d7e", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"How to Choose an R Stats Package\"\n---\n\n\n\nWhen you want to implement statistical methodology in R and you don't already know the package that does the method, it can be difficult to figure out the \"best\" package. Sometimes when you google a method, the top result is the package with wording that matches closest to what you typed into google, rather than the package that is \"best\" for the method. You don't want to waste time looking deeply into a package only to find there is a more common and robust R package out there for the same method. So before you go down a rabbit hole on one package, it is worth checking a few packages. But, how do you \"check\" a package. This blog post will go through a worked example of comparing packages and a checklist of things to look at when comparing packages in the future.\n\n## Package Checklist\n\nWhen looking at a package the first place to start is the CRAN index page. You can find this by googling CRAN and name of the package. From there you want to ask yourself the following questions:\n\n- Is this package being actively maintained?\n\n- Is the author known in the field?\n\n - Is there more than one author?\n\n- Does the package have adequate documentation?\n\n - Are there references to external peer reviewed papers?\n\n - Is there a website / vignettes?\n\n- Is there a way to report bugs?\n\n- Can the packages handle different edge cases?\n\n- Does the package have a lot of dependencies / unusual dependencies?\n\n- Look at community adoption?\n\nUsing this checklist can help you quickly and consistently get a sense of a package before spending time looking into the code directly. Let's see how this works in practice.\n\n## Worked Example: Wilcoxon Rank-Sum Test\n\nFor this, we are going to look at the Wilcoxon Rank-Sum test and the associated Hodges-Lehmann confidence interval. After googling a bit, I found three different packages that do a Wilcoxon Rank-Sum p-value and Hodges-Lehmann CI:\n\n1. {stats} (part of base R)\n\n2. {pairwiseCI}\n\n3. {coin}\n\nGreat! I might be kind of done, because I tend to favor base R stats functions, but as I start looking into this, I found the {stats} function can't handle ties if I want the exact methods. So I need to look into and compare the {pairwiseCI} and {coin} packages.\n\n::: callout-tip\nYou often find that differences between packages and software show up when there are ties, missing data, and/or extreme values, so it is good to try to include these in the dataset you are using to compare.\n:::\n\nNow I need to choose between {pairwaiseCI} and {coin}. I could just run the model in both and see if the results match, but that will be a lot of work. So before I get started I want to go through our checklist.\n\nLet's pull up the CRAN index pages for each of these packages and see if we can figure out which package we should use for this analysis.\n\n### {pairwiseCI}\n\nStarting with {pairwiseCI}, the [index](https://cran.r-project.org/web/packages/pairwiseCI/index.html) page looks like this:\n\n![](images/clipboard-4202698305.png)\n\nNow let's go down the checklist to see if there are any red flags for this package.\n\n- Is this package being actively maintained?\n\n

\n\n The last update to this package was 2019-03-11, so over 6 years at the writing of this post. That indicates this probably isn't being actively maintained.\n\n

\n\n- Is the author known in the field?\n\n

\n\n Personally, I don't know this author, but it looks like he does work in a biostatistics department at a university, so that is a really good sign. When you are looking at the author you don't need to go super in-depth, but if you don't know who they are it can be good to check their qualifications.\n\n

\n\n - Is there more than one author?\n\n

\n\n Yes, it looks like there are two authors here. This can be good to check because it can mean the burden of maintaining the package is shared and the documentation has potentially been peer reviewed.\n\n

\n\n- Does the package have adequate documentation?\n\n

\n\n This can be hard to determine from just this index page, but the sub-questions can help here.\n\n

\n\n - Are there references to external peer reviewed papers?\n\n

\n\n On the index there are no references, but there might be some references on the function level. Really it just means they haven't published a paper about this package.\n\n

\n\n - Is there a website / Vignettes?\n\n

\n\n There isn't a website or vingettes. This means all the documenation will be limited to just the functions, which can be harder to understand.\n\n

\n\n- Is there a way to report bugs?\n\n

\n\n There doesn't appear to be a standard way to report bugs. If this package was on github or something similar, I would be able to check any issues there to see if others had similar problems or if the issue was caused by user error and has a quick fix.\n\n

\n\n- Can the packages handle different edge cases?\n\n

\n\n The description doesn't say anything about handling special cases. But, I did find this package because it can handle ties in the exact case.\n\n

\n\n- Does the package have a lot of dependencies / unusual dependencies?\n\n

\n\n It looks like this package only has two dependencies, {MCPAN} and, interestingly, {coin}, the other package we are looking at.\n\n

\n\nOkay, having gone through all but the final question, I would say I feel not amazing about the package, but if it was my only option I would still try to use it. The author gives me confidence in the package, but other things like documentation and last update date, make me a bit nervous about this package.\n\n### {coin}\n\nNow on to {coin} with the same questions. The [index](https://cran.r-project.org/web/packages/coin/index.html) page is as follows:\n\n![](images/clipboard-1725037014.png)\n\n- Is this package being actively maintained?\n\n

\n\n The last update to this package was 2023-09-27, so more recently than {pairwiseCI}.\n\n

\n\n- Is the author known in the field?\n\n

\n\n Again, I don't know the author, but he has an R-project.org email, which indicates he is very involved in the R ecosystem and is a very good sign.\n\n

\n\n - Is there more than one author?\n\n

\n\n Yes, there are 5 different authors, so lots of eyes on bugs and documentation.\n\n

\n\n- Does the package have adequate documentation?\n\n

\n\n Again, this can be hard to determine from just this index page, but the sub-questions can help here.\n\n

\n\n - Are there references to external peer reviewed papers?\n\n

\n\n Yes, there is a peer reviewed paper in the description of this package.\n\n

\n\n - Is there a website / Vignettes?\n\n

\n\n While there isn't a website here, there are four different vignettes.\n\n

\n\n- Is there a way to report bugs?\n\n

\n\n Like with the other package, there doesn't appear to be a standard way to report bugs.\n\n

\n\n- Can the packages handle different edge cases?\n\n

\n\n Same as above. It doesn't directly said it can handle our edge case, but I know it can.\n\n

\n\n- Does the package have a lot of dependencies / unusual dependencies?\n\n

\n\n While this package has more dependencies, all the dependencies are very standard and do not raise any red flags for me\n\n

\n\nHaving gone through most the questions, I am fairly confident in saying I want to use {coin} to investigate this method rather than {pairwiseCI}. For almost all the questions {coin} looks slightly better than {pairwiseCI} and really just has a larger accumulation of evidence of quality. But, I haven't answered the last question in my checklist for either these packages. What about community adoption? It can be a bit hard to look at directly, but I tend to use a few different ways.\n\nFirst, staying on the CRAN index page for the package, I look at the Reverse Dependencies. This section gets split into three parts, \"Reverse depends\", \"Reverse imports\", and \"Reverse suggests\" which explains how the other packages are using the package. In terms of community adoption, it doesn't matter if other packages are depending, importing or suggesting the package, all that matters is they are using it. **Note:** This section only appears if other packages on CRAN use the package.\n\nFor these two packages, only {coin} has this section and we can see there are many other packages that use {coin}.\n\n![](images/clipboard-118256266.png)\n\nThe next thing I will check is the number of downloads. This can easily be done with the following bit of code:\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncranlogs::cran_downloads(package = c(\"coin\", \"pairwiseCI\"))\n```\n:::\n\n\n\n![](images/clipboard-3979687165.png)\n\nAnd you can see {coin} is much more popular than {pairwiseCI}.\n\nSo with all of this information, I think starting with {coin} is going to be the best use of my time.\n\nWhen looking at the number of downloads, you can look over a longer period like over the last month (by using the `when` parameter) or you can look between specific dates (by using the `from` and `to` parameters). But, it will give you the download numbers for each day, which you will need to summaries. These day-by-day numbers can be very helpful to look at trends, especially when there is a new package that is getting rapidly adopted.\n\nThe checklist isn't intended to replace a full review of the package for an GxP workflows. But, when just trying to decide which package to look into for a particular stats method it can be helpful.\n\nIn summary, selecting the appropriate R package for statistical analyses is hard. Google, isn't perfect and so it worth finding a few packages and going through this checklist. By taking a few minutes to consider factors like maintenance, documentation, and community adoption can save you time in the long run.\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/meetings/2026-01-12/execute-results/html.json b/_freeze/minutes/meetings/2026-01-12/execute-results/html.json index ff1f14ce6..532a5e7dc 100644 --- a/_freeze/minutes/meetings/2026-01-12/execute-results/html.json +++ b/_freeze/minutes/meetings/2026-01-12/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "00ec3f4b0eea1aaa975cec86638bc0b5", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"2026 Plans\"\ndate: \"12 January 2026\"\ncategories:\n - 2026\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n# Attendees\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
Attendees 12_Jan_26
Lyn Taylor Yes
Christina Fillmore Yes
Yannick Vandenijck Yes
Molly MacDiarmid Yes
Brian Varney Yes
My Luong Vuong Yes
Mariusz Zieba Yes
\n\n
\n\n`````\n:::\n:::\n\n\n# Agenda & Minutes\n\n**Ideas for the CAMIS project in 2026 and volunteer needs !**\n\n- **Existing content review:**\n\n Volunteers needed to do existing content review & fixes / updates (content auditors). Action needed to go look through existing documentation and check it makes sense and all links still working. If you or anyone you know would like to volunteer to do this task let the leadership team know. For example, it would be a great activity for a junior statistician or programmer who wants exposure to new skills in open source programming and statistical analysis.\n\n - **If you know of any issues/gaps in current content please add as issues  [Issues · PSIAIMS/CAMIS](https://github.com/PSIAIMS/CAMIS/issues)**\n\n - **If you know of anyone who would be willing to volunteer please let us know! \\\n New Year.. New you.. New project !!**\n\n - **If your company would like us to run a hackathon sharing open source techniques wider with your colleagues which we could use to fix some of these issues then please contact us !**\n\n- **Comparison pages** - Python vs R vs SAS\n\n - Current documentation is mostly focused on SAS & R.\n\n - **Volunteers needed to update Comparison pages bringing in the python information**\n\n - In some case may need separate docs (eg. Python vs R), but in most cases hopefully can put in same document as; R vs SAS vs Python - decision depends on size of content.\n\n - Could consider Julia too to the project.\n\n- **Planned new content for 2026**\n\n - update pages to be CMH Only (no CIs), Cis for props for single sample, add new page for CIs for proportion differences, ORs, Risk differences taking content from other pages & Updating to use cicalc package. (new cicalc version released now on cran) - Christina to action\n\n - MMRM - Stephen Waugh hopes to complete in Jan\n\n - Kolmogorov-Smirnov test - My Luong Vuong: R, Molly MacDiarmid- SAS. Mariusz Zieba - python\n\n - Marginal homogenicity (SAS only) -  Molly / Lyn.\n\n - Comparison pages for sample size - Agnieska doing in jan\n\n - weighted log rank/ recurrent events - Yannick\n\n - RMST Stephen McCawille no longer available to do this. Lyn or Yannick student to do R page.\n\n - Remove LMM (DF row) or update to be linear mixed models (without repeated measures). Lyn & Christina to discuss with Stephen W & agree best titles for this section\\\n \\\n\n- Conference schedule & ensuring we are marketing our latest content & progress\n\n - New blog released [CAMIS Blog](https://psiaims.github.io/CAMIS/blogs/) re: 2025 awards.\n\n - Conference tab updated to be 2026. Please let us know if you want to present on CAMIS in 2026.\n\n - Lyn & Yannick will create a Create 2026 template: Abstract, poster /  summary of progress to date/ latest content that could be used in conferences?  [2026 Conference material](https://phuseaccount.sharepoint.com/:f:/r/sites/ClinicalStatisticalReportinginaMultilingualWorld/Shared%20Documents/General/2026%20Conference%20material?csf=1&web=1&e=Yv8Tff \"Original URL: https://phuseaccount.sharepoint.com/:f:/r/sites/ClinicalStatisticalReportinginaMultilingualWorld/Shared%20Documents/General/2026%20Conference%20material?csf=1&web=1&e=Yv8Tff. Click or tap if you trust this link.\")\n\n - Lyn has approval from Parexel to present to AZ R conference\n\n - Becca Krouse (GSK) will represent us at PHUSE US connect.\n\n - PHUSE EU Connect - could do a workshop 1.5 hrs to identify issues & fix (like the diversity hackathon).\n\n - R in medicine - Christina potentially doing a workshop for diversity alliance.\\\n \\\n\n- AI & LLM on CAMIS repo - Long term goal ?? \n\n```{=html}\n\n```\n- PHUSE TEAMS area - has been updated. Welcome to store / communicate using it. \n\n- **Communications update:**\n\n NOTE: new blog is now posted here: [CAMIS Blog](https://psiaims.github.io/CAMIS/blogs/)\n\n Lyn & Molly to meet with Alex re: PHUSE blogs because now there is a form, & 2 x review cycle.\\\n Discuss sending Blogs vs sending emails to the team\n\n PSI process for blog delivery is working well, they link to our blogs via their newsletters.\n\n- **Next meeting: 9th February 4:30 GMT, 5:30 CET and 11:30 EST**\n", + "markdown": "---\ntitle: \"2026 Plans\"\ndate: \"12 January 2026\"\ncategories:\n - 2026\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n# Attendees\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
Attendees 12_Jan_26
Lyn Taylor Yes
Christina Fillmore Yes
Yannick Vandenijck Yes
Molly MacDiarmid Yes
Brian Varney Yes
My Luong Vuong Yes
Mariusz Zieba Yes
Miriam Amor No
Michael Walshe No
Kirsten Findlay No
\n\n
\n\n`````\n:::\n:::\n\n\n# Agenda & Minutes\n\n**Ideas for the CAMIS project in 2026 and volunteer needs !**\n\n- **Existing content review:**\n\n Volunteers needed to do existing content review & fixes / updates (content auditors). Action needed to go look through existing documentation and check it makes sense and all links still working. If you or anyone you know would like to volunteer to do this task let the leadership team know. For example, it would be a great activity for a junior statistician or programmer who wants exposure to new skills in open source programming and statistical analysis.\n\n - **If you know of any issues/gaps in current content please add as issues  [Issues · PSIAIMS/CAMIS](https://github.com/PSIAIMS/CAMIS/issues)**\n\n - **If you know of anyone who would be willing to volunteer please let us know! \\\n New Year.. New you.. New project !!**\n\n - **If your company would like us to run a hackathon sharing open source techniques wider with your colleagues which we could use to fix some of these issues then please contact us !**\n\n- **Comparison pages** - Python vs R vs SAS\n\n - Current documentation is mostly focused on SAS & R.\n\n - **Volunteers needed to update Comparison pages bringing in the python information**\n\n - In some case may need separate docs (eg. Python vs R), but in most cases hopefully can put in same document as; R vs SAS vs Python - decision depends on size of content.\n\n - Could consider Julia too to the project.\n\n- **Planned new content for 2026**\n\n - update pages to be CMH Only (no CIs), Cis for props for single sample, add new page for CIs for proportion differences, ORs, Risk differences taking content from other pages & Updating to use cicalc package. (new cicalc version released now on cran) - Christina to action\n\n - MMRM - Stephen Waugh hopes to complete in Jan\n\n - Kolmogorov-Smirnov test - My Luong Vuong: R, Molly MacDiarmid- SAS. Mariusz Zieba - python\n\n - Marginal homogenicity (SAS only) -  Molly / Lyn.\n\n - Comparison pages for sample size - Agnieska doing in jan\n\n - weighted log rank/ recurrent events - Yannick\n\n - RMST Stephen McCawille no longer available to do this. Lyn or Yannick student to do R page.\n\n - Remove LMM (DF row) or update to be linear mixed models (without repeated measures). Lyn & Christina to discuss with Stephen W & agree best titles for this section\\\n \\\n\n- Conference schedule & ensuring we are marketing our latest content & progress\n\n - New blog released [CAMIS Blog](https://psiaims.github.io/CAMIS/blogs/) re: 2025 awards.\n\n - Conference tab updated to be 2026. Please let us know if you want to present on CAMIS in 2026.\n\n - Lyn & Yannick will create a Create 2026 template: Abstract, poster /  summary of progress to date/ latest content that could be used in conferences?  [2026 Conference material](https://phuseaccount.sharepoint.com/:f:/r/sites/ClinicalStatisticalReportinginaMultilingualWorld/Shared%20Documents/General/2026%20Conference%20material?csf=1&web=1&e=Yv8Tff \"Original URL: https://phuseaccount.sharepoint.com/:f:/r/sites/ClinicalStatisticalReportinginaMultilingualWorld/Shared%20Documents/General/2026%20Conference%20material?csf=1&web=1&e=Yv8Tff. Click or tap if you trust this link.\")\n\n - Lyn has approval from Parexel to present to AZ R conference\n\n - Becca Krouse (GSK) will represent us at PHUSE US connect.\n\n - PHUSE EU Connect - could do a workshop 1.5 hrs to identify issues & fix (like the diversity hackathon).\n\n - R in medicine - Christina potentially doing a workshop for diversity alliance.\\\n \\\n\n- AI & LLM on CAMIS repo - Long term goal ?? \n\n```{=html}\n\n```\n- PHUSE TEAMS area - has been updated. Welcome to store / communicate using it. \n\n- **Communications update:**\n\n NOTE: new blog is now posted here: [CAMIS Blog](https://psiaims.github.io/CAMIS/blogs/)\n\n Lyn & Molly to meet with Alex re: PHUSE blogs because now there is a form, & 2 x review cycle.\\\n Discuss sending Blogs vs sending emails to the team\n\n PSI process for blog delivery is working well, they link to our blogs via their newsletters.\n\n- **Next meeting: 9th February 4:30 GMT, 5:30 CET and 11:30 EST**\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/minutes/meetings/2026-03-09/execute-results/html.json b/_freeze/minutes/meetings/2026-03-09/execute-results/html.json new file mode 100644 index 000000000..6d95b64ac --- /dev/null +++ b/_freeze/minutes/meetings/2026-03-09/execute-results/html.json @@ -0,0 +1,15 @@ +{ + "hash": "0fc4c5cceef264f034f695ad0cbcfe95", + "result": { + "engine": "knitr", + "markdown": "---\ntitle: \"2026 new content in for GLMM and CI for props\" \ndate: \"09 March 2026\" \ncategories: \n - 2026 \noutput: \n html_document: \n toc: true \n toc_float: false \n toc_level: 5 \n df_print: paged \n---\n\n# Attendees\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
Attendees 09_Mar_26
Lyn Taylor Yes
Christina Fillmore Yes
Yannick Vandenijck No
Molly MacDiarmid Yes
Brian Varney Yes
My Luong Vuong Yes
Mariusz Zieba No
Miriam Amor Yes
Michael Walshe Yes
Kirsten Findlay Yes
\n\n
\n\n`````\n:::\n:::\n\n\n# Agenda & Minutes\n\nNew content in\n\n- GLMM - Miriam\n\n- CI for props - split into 5 sections Poisson rate CI's separate to binomial, PR in, awaiting review/approval.\n\n- Survival Cox_PH model using survival::strata, issue prior to Version 3.8.0\n\n- Upgrade of renv to later R version, led to many files changing 100+ files. So make sure you pull down latest repo before you do a PR.\n\n- upgrade also led to different code needed for SAS code / SAS files. Use 3 back ticks & {SAS} instead of {r} and #\\|eval: false . see Generalized Linear Mixed Models (GLMM) SAS file as a good example.\n\n**Ongoing planned new content for 2026**\n\n- CI for props binomial using cicalc - Christina\n\n- MMRM - Stephen Waugh\n\n- Kolmogorov-Smirnov test - My Luong Vuong: R, Molly MacDiarmid- SAS. Mariusz Zieba - python\n\n- Marginal homogenicity (SAS only) -  Molly / Lyn.\n\n- weighted log rank/ recurrent events - Yannick\n\n- RMST - GSK investigating\n\n**Blogs**\n\n- [PHUSE DVOST CAMIS blog: Reference-Based Multiple Imputation](https://phuse.global/Communications/PHUSE_Blog/phuse-dvost-camis-blog-reference-based-multiple-imputation)\n\n- Next blogs could be\n\n - Miriams GEE/GLMM content\n\n - CI for props\n\n**Conference schedule**\n\n- Lyn will create a Create 2026 template: Abstract, poster /  summary of progress to date/ latest content that could be used in conferences?  [2026 Conference material](https://phuseaccount.sharepoint.com/:f:/r/sites/ClinicalStatisticalReportinginaMultilingualWorld/Shared%20Documents/General/2026%20Conference%20material?csf=1&web=1&e=Yv8Tff \"Original URL: https://phuseaccount.sharepoint.com/:f:/r/sites/ClinicalStatisticalReportinginaMultilingualWorld/Shared%20Documents/General/2026%20Conference%20material?csf=1&web=1&e=Yv8Tff. Click or tap if you trust this link.\")\n\n- Becca Krouse (GSK) will represent us at PHUSE US connect.\n\n- PHUSE EU Connect - could do a workshop 1.5 hrs to identify issues & fix (like the diversity hackathon).\n\n- Christina presenting on Friday 13th March to the PSI apprentices\\\n\n**Next meeting: 13th April 4:30 GMT, 5:30 CET and 11:30 EST**\n", + "supporting": [], + "filters": [ + "rmarkdown/pagebreak.lua" + ], + "includes": {}, + "engineDependencies": {}, + "preserve": {}, + "postProcess": true + } +} \ No newline at end of file diff --git a/_freeze/minutes/posts/03Jan2025/execute-results/html.json b/_freeze/minutes/posts/03Jan2025/execute-results/html.json deleted file mode 100644 index 50468b9a4..000000000 --- a/_freeze/minutes/posts/03Jan2025/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "3ce0195060e184eaf80604a3ea0a9f54", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"CAMIS Objectives 2025\"\ndate: \"13 Jan 2025\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 13_Jan_25
Lyn Taylor Yes
Christina Fillmore Yes
Chi Zhang Yes
Molly MacDiarmid Yes
Benjamin Arancibia Yes
Michael Kane Yes
Martin Brown Yes
Stephen McCawille Yes
Miriam Amor Yes
Peilin Zhou Yes
Samrit Pramanik Yes
Brian Varney Yes
Vikrant Vijay Yes
Yannick Vandendijck Yes
Vikash Jain Yes
Michael Walshe Yes
Anwesha Roy Yes
Min-Hua Jen No
Jaskaran Saini No
Mariusz Zieba No
Chelsea Dickens No
Tejas Pandit No
Ashwath Gadapa No
Sarah Brosens No
Kirsten Findlay No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n**2025 Objectives**\n\n***Communication***\n\n``` \n - Expanding awareness within companies\n\n - More Through PSI (statistician in the pharmaceutical industry) contacts\n\n - Volunteers required for: 2025: 3 x series of CAMIS workshops\n\n - Introduction to CAMIS project /how to use it contribute\n \n - Key findings of CAMIS project\n \n - How do you do comparisons in software.\n\n\n - Other organizations, ASA /OpenStatsWare, EFSPI will be covered through PSI, RinPharma (hosting services YouTube), ISBC -Aug Basel Yannick offered to submit abstract & present on our behalf.\n ACTION: Lyn to send latest version of slides: & load to website.\n\n - Expand awareness to university contacts. Michael Kane can provide introductions through R medicine. Will raise at next meeting to see if we can find an interested professor. There is a desire for students to want projects in pharma topics (master students) -- especially regulatory. We are happy to offer presentations/ workshops to anything they want. (Stephen Waugh could present his experience?)\n```\n\n***Conferences***\n\nIf anyone can attend to represent us let us know, we can help with abstracts / slides.\n\n``` \n - posit::conf(2025)** Get the full details on [the blog post](https://urldefense.com/v3/__https:/info.posit.co/NzA5LU5YTi03MDYAAAGX8BSQdqgtSXVvlxo3uOEKf6cyZbztuQua15w8xXZxe1apl5i8EF9CsQLDaXVdqYXWvjSHzLo=__;!!GfteaDio!aUG_6o_VjPNuaS8HtZgYUS61J7SQrYdKV_-mpyMEkGutGnBN9WATMv9lIk5MflS0BofTiViRryNVUD4_9A$). Closes 3^rd^ Feb. Talks are 20 minutes long \\'pharma stream\\' and will be delivered in person in Atlanta on September 17 or 18 **Volunteers..**\n\n - PHUSE -- SDEs / Conferences..... Volunteers..\n\n - **PHUSE US connect:** [PHUSE US Connect 2025 \\| CDISC](https://www.cdisc.org/events/education/external-events/2025/03/phuse-us-connect-2025)  16^th^-19^th^ March.. Lyn to send content to Mike????\n\n - ISBC Feb 14^th^, Yannick will submit abstract\n\n - PharmaSUg conference **Volunteers..**\n\n - R medicine Volunteers. (Michael kane will be going). Vitual (Chi?)\n\n - R in Pharma Volunteers.\n\n - PSI Conference -- we have 2 talks.\n```\n\n***Improving social media frequency***\n\nBlogs to summarise What are the latest repo updates, how do we showcase this on the website? Or by blogs (but how do people get made aware of them)!!!.\n\nACTION: Lyn to speak Harshil. Focused Role just to write Monthly blogs. -- Bring in extra person to help if needed.\n\n***Improving Technical back end***\n\nWhen lyn tested new posit workbench method. Can do demo & give access to frequent contributors who struggle with package control using renv / rendering.\n\n- Christina & Michael Walsh will update us at a later meeting. (Eg. PRs with dummy website views)\n- Strategy for how to make Comparison pages more stable with respect to R version changes\n- Possibility to include checks which run the code & check for change\n\n***Key Topics to update*** MMRM (Stephen Waugh), CMH (lyn), Sample size (Agnieszka & Andisheh & Molly), re-organize logistic regression (Lyn/Chi), reference based multiple imputation (Yannick), Miriam (Generalized MMRM)\\*\\*\n\n***Highlights of key content that has been updated & Summary of findings.***\n\nCoin package: Martin PPD to edit Wilcoxon signed rank Lyn let agnieska know. Stephen McCawille -- propensity scores restriction on SAS so used R: to talk to christina.\n", - "supporting": [ - "03Jan2025_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/10Feb2025/execute-results/html.json b/_freeze/minutes/posts/10Feb2025/execute-results/html.json deleted file mode 100644 index 32ee23926..000000000 --- a/_freeze/minutes/posts/10Feb2025/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "91c3046e6949e67f4cb03d20101c0d39", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"General meeting updates\"\ndate: \"10 Feb 2025\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 10_Feb_25
Lyn Taylor Yes
Christina Fillmore No
Chi Zhang No
Molly MacDiarmid Yes
Benjamin Arancibia No
Michael Kane Yes
Martin Brown No
Stephen McCawille Yes
Miriam Amor Yes
Peilin Zhou No
Samrit Pramanik No
Brian Varney Yes
Vikrant Vijay No
Yannick Vandendijck Yes
Vikash Jain Yes
Michael Walshe Yes
Anwesha Roy No
Min-Hua Jen No
Jaskaran Saini No
Mariusz Zieba No
Chelsea Dickens No
Tejas Pandit No
Ashwath Gadapa No
Sarah Brosens No
Kirsten Findlay No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- **Volunteers needed for**\n\n - CI for props section (See r page).\n\n - any R packages that can do CI for matched 2 proportions using wilson or normal approximation methods.\n\n - any R packages that can do CI for unmatched 2 proportions using newcombe (or wilson method)\\\n (Stephen M suggested: and \\\n ACTION: Lyn to investigate & write up.\n\n - NOTE: logistic regression will be updated soon to include Multiple treatment contrasts.\\\n\n- **Conference planning** See latest Conference Tab on website (lyn updated today but needs PR approving)\n\n``` \n- posit::conf(2025) deadline 3rd Feb in person atlanta\n\n- PHUSE US connect 16th-19th march. lyn to send content to Hamming Tu (Vikash attending) Lyn to CC vikash let Hamming know you are going. Vikash & Hamming to discuss any presenting plan for CAMIS\n\n- ISBC 14th Feb submission deadline. Yannick submitting abstract\n\n- Any others? Please add to the page\n```\n\n- **Communication team update (Vikash & Molly)**\n\n - Blog ideas\n\n - latest repo updates (CI for Props)\n\n - Key interesting findings\n\n - Conferences\n\n - Molly will maintain conferences tab going forward\n\n- **Content updates**\\\n Anyone with any questions about what they are working on or how to assign themselves?\n\n - MMRM (stephen waugh)\n\n - CMH (Lyn)\n\n - Sample size (agnieska)\n\n - Reference based MI (Yannick)\n\n - Generalized (binomial & multi-nom?) MMRM (Miriam)\n\n - Wilcoxon signed rank coin package - Martin\n\n - Propensity scores restriction on SAS vs R (Stephen McCawille & Christina)\\\n\n- **Objective to get more regulatory input - ongoing**\\\n Work with PSI AIMS as they plan a EMA regulatory panel discussion on R Any other ideas?\\\n FDA/ Other regulators input/discussion.\n\n- Think about white paper.. on robustness of results and finding local minima in convergence that are actually issues?....\n\n- **University contacts - ongoing**\n\n - Christina has a new student looking for CAMIS project\n\n - Michael Kane : R medicine\n\n - Yannick may have a intern in summer 8-10 weeks CAMIS research\n\n - Stephen W doing MSc dissertation project on MMRM\n\n- **Technical back end improvements - ongoing**\n\n- **CAMIS- 3 x workshops - ongoing**\n\n - Introduction to CAMIS project / how to contribute - Christina.\n\n - Key findings of CAMIS project - Lyn\n\n - How do you do comparisons in software\n\nACTION :Lyn to ask Orla if she can update renv for Yannicks PR. (And also explain how to do it so others can do it in future).\\\n", - "supporting": [ - "10Feb2025_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/10July2023/execute-results/html.json b/_freeze/minutes/posts/10July2023/execute-results/html.json deleted file mode 100644 index 69aada588..000000000 --- a/_freeze/minutes/posts/10July2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "216d4d02c310a6e697f78e3e05e42c30", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Plan for Advertising CAMIS progress\"\ndate: \"19 July 2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 10_july_2023
Aiming Yang No
Ben Arancibia No
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens No
Chi Zhang No
Clara Beck No
Aditee Dani No
Doug Kelkoff No
Dhvani Patel No
Filip Kabaj No
Harshal Khanolkar Yes
Iris Wu Yes
Jayashree Vedanayagam Yes
Joe Rickert No
Kyle Lee Yes
Leon Shi No
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane No
Michael Rimler No
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla Yes
Vandana Yadav Yes
Vidya Gopal No
Vikash Jain Yes
Wilmar Igl No
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n- Welcome new members - Vandana Yadav (Novo Nordisk) and Iris Wu (incyte)\n- CAMIS Advertisement Plan - Harshal\n - White paper\n\n - Molly poster prize\n\n - Conference plan\n- Website\n - MMRM/ Other\n- CAMIS-ONCO / Prep for PHUSE CSS- Soma Sekhar\n- AOB\n - Next meeting 14th Aug. Lyn on vacation, do you want to go ahead or move back 1 week?\n\n# Meeting minutes\n\n**CAMIS Advertisement Plan**\n\n- Blog re: White paper - All to share link with colleagues and like on social media,\n\n- Blog re: Molly poster prize - ACTION: Molly to write\n\n- LinkedIn -\n\n - Should we have our own account (CAMIS linkedIN account) or post through the other organizations. Could post through our own personal accounts & have them repost?\n\n - ACTION : Harshal to post from own personal account and we will assesss reach if we all like it and ask the organizations to repost it.\n\n- Any other social media sites needed to be used? Twitter was popular but not so now so stick with LinkedIn.\n\n- Spread awareness though individual departments working groups\n\n- PHUSE SDE connect - talk / presentation / posters or interactive workshops at connect SDEs.\n\n- PHUSE/ FDA CSS 2024 - Working groups interactive breakout sessions. create a DVOST break out sessions - see [CSS Working Groups (phuse-events.org)](https://www.phuse-events.org/attend/frontend/reg/tOtherPage.csp?pageID=15403&ef_sel_menu=1694&eventID=26)\n\n - Maybe take inspiration from this years event to see what format they take\n\n - ACTION Soma to report back after conference and we can plan for next year\n\n- Social media post on CAMIS engagement at conferences\n\n- Utilise #CAMIS on social media.\n\n- Pharmaverse - Could we get a link from Pharmaverse\n\n - ACTION: Christina to see if we can get a link?\\\n \\\n\n- Conference plan. The conference plan on the website was reviewed and updated\n\n - PHUSE FDA quarterly meeting. CAMIS invited to present. Plan to go through White paper concepts & website. Request a replacement to represent FDA on our group, since Kyle Lee no longer at FDA. Promote use of site to them. Likely to be in attendance someone from the Division of analytics and informatics in CEDER.\n\n - PHUSE CSS: Somar producing poster. Also doing a talk on validation of oncology endpoints and why it's important to introduce hybrid programming languanges. Will be representatives present from regulatory authorities\n\n**Website progress: Christina**\n\n- MMRM (Ben Arancibia happy to contribute)\n- Others - Not much new content in last month. Please if anyone time please add content !\n- Should we prioritize any areas, or in getting Python/Julia content. Currently we will just see what content people have, rather than priorizing however re-assess based on growth to see if we need to focus more on a single area and get more volunteers on key areas.\n- FAQ Doc - still in progress\n\n**CAMIS- ONCO: Soma Sekhar**\n\n- Started work on poster due July 28th.\n\n- After presentation could convert to a white paper. ACTION: send out a draft plan for white paper, & lyn to add to agenda for discussion next meeting. Planning to evaluate endpoints for oncology. Table of endpoints required for approval.\n\n**AOB**\n\nNext meeting 14th Aug. Lyn on vacation, will move back 1 week.\n", - "supporting": [ - "10July2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/10June2024/execute-results/html.json b/_freeze/minutes/posts/10June2024/execute-results/html.json deleted file mode 100644 index b5d584533..000000000 --- a/_freeze/minutes/posts/10June2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "e322a9848ec1bc2c055139155f9a5e66", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Repo Content Growth, Conferences 2024 & Advertising, CAMIS-ONCO & Dissertation project kick off\"\ndate: \"10 June 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 10_Jun_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney No
Chi Zhang Yes
Orla Doyle No
Harshal Khanolkar Yes
Lily Hseih No
Filip Kabaj No
Martin Brown No
Min-Hua Jen No
Sarah Rathwell Yes
Kasa Andras Yes
Aditee Dani No
Keaven Anderson Yes
Benjamin Arancibia Yes
Wilmar Igl No
Vikash Jain No
Mia Qi No
Leon Shi No
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak Yes
Michael Kane No
Lukas Brausch No
Michael Walshe Yes
Seemani Abhilipsa Yes
Aiming Yang No
Cuifeng Yin No
Todd Coffey Yes
Jayashree Vedanayagam Yes
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy No
Samrit Pramanik No
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n## Monthly Contributions update -Christina\n\nThank you to everyone whose contributed this month, Special shout outs to Seemani, Lukas, David & Agnieska and anyone else we've missed who completed pull requests this month.\n\nNOTE that if your work requires a package not yet in the renv.lock file, then you need to install the package and do renv::update() to update the lock file. When you do the pull request, check 2 files change (i.e. the renv.lock file & your file you are submitting).\\\n\nChristina plans to update the renv control method soon which will hopefully avoid some of the package / renv issues going forward. Remember if you do have problems with the install.packages() & update to renv lock file just let Christina know. **ACTION: Christina**: To add to the contributions guidance once new method agreed.\n\nIt was noted by Seemani, that her MANOVA Python content was loaded to the folder, but not visible on the website. **ACTION: Christina**: to update the TOC to point to the material.\n\nPlease can people when picking up new pieces to work on update the table of assignments saved in the following [readme](https://github.com/PSIAIMS/CAMIS/tree/main/non_website_content) Or ask Lyn/ Christina to update it for you. This ensures we dont have multiple people working at the same time duplicating effort, when they could be working together.\n\n## Conferences update - Lyn\n\nReminder that if you are attending a conference to represent CAMIS to add the detail [here](https://psiaims.github.io/CAMIS/publication/conference.html).\n\nWe have updated the page linking to much of the content presented in 2024 and showing we are currently presenting at 7 seminars/ SDEs/ conferences in 2024.\n\nChristina also presented at the Merck R users group last month. **ACTION: Christina**: to add to the conferences list to show we did this. If anyone else wants a presentation to their companies please let her know.\n\nStephen McCawille is also attending PHUSE EU as well as Christine & Agnieska, so the 3 of them can meet up in person.\n\nAndras Kasa - informed us that there is a PHUSE SDE at UCB in Brussels in september. Details can be found [here](https://www.phuse-events.org/attend/frontend/reg/thome.csp?pageID=41350&eventID=65&CSPCHD=001001000000PhrBQ2idz0rcRJRn8sp7vIfIEqWC1RCufjt3kK). Contact UCB biosciences team: Christophe.Praet\\@ucb.com and marc.derycke\\@ucb.com if we have someone who could present at this meeting.\n\nUnfortunately Soma and Vikash were unable to present the workshop at PHUSE CSS last week, however Mike Stackhouse kindly stood in for us, and led a round table discussion about the project. Huge Thank you to Mike for his support and last minute help so we could continue with a session.\n\n**ACTION: Volunteer please!!** to attend and present at this SDE in person as it would be really good for CAMIS to be presented at this event.\n\nKeevan asked if we had a single slide to advertise CAMIS. **ACTION: Lyn** to load single slide to non-website content and add a link to it on conferences tab. Can be found [here](https://github.com/PSIAIMS/CAMIS/tree/main/non_website_content/conferences/2024/PHUSE%20CSS%20CAMIS%20Single%20slide%202024.pptx).\n\n**ACTION:** Christina : Move Phuse-EU2023 pptx into 2023 folder.\n\n## Pharma - SUG linkedin Post - Chi\n\nChi highlighted the incredible post from Phil Bowsher advertising the CAMIS project. This has been seen by over 400 people with 38 reposts to date ! It has also been commented on by renowned statisticians all enthusiastic about CAMIS, so it's great to have the awareness of the project growing.\n\n\n\n**ACTION: Harshil** to repost/share in a couple of weeks to maximize the reach of the post.\n\n##Update from Survival team - Christina\n\nSoma has officially stepped down as CAMIS-ONCO lead, so Christina will act as this for the time being.\n\nKick off meeting has occurred last month & the team are now meeting monthly. The first objective is for people to bring together information on the non-proportional hazards models.\n\nIn future, would be great to have someone take on the lead / co-lead if they feel strongly about leading this, but needs someone dedication to making progress !\n\n##Dissertation scheme kick off - Chi / Lyn/Christina##\n\nWe have launched a new page [here](https://psiaims.github.io/CAMIS/publication/dissertation.html) which will provide ideas for students wanting to look at dissertation projects involved in comparing analysis method differences across software. If you have an idea for a project that you want to research but dont have time to investigate the project yourself, then you could write an abstract and save it to this page, where students looking for projects (or academics looking on behalf of students), could find ideas for projects. Expectation is that most would be MSc level (summer 3 month project), however longer PhD style project could also be offered. Please reach out to us if you are interested in contributing to this area.\n\n## 2024 Goals Reminder!\n\n- Increase to 45 closed by the end of the year - Currently at 37 closed.\n- In addition, we'd like to improve some of the incomplete content such as MMRM - Stephen Waugh dissertation project launched, if accepted would run sept 24-july25.\n- Create a webpage for listings dissertation projects - Ongoing\n\n## AOB\n\nOur Blog page dosn't appear in date order or aligned! If anyone can fix let us know ! Saved under News [here](https://psiaims.github.io/CAMIS/blogs/)\n", - "supporting": [ - "10June2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/10Mar2025/execute-results/html.json b/_freeze/minutes/posts/10Mar2025/execute-results/html.json deleted file mode 100644 index b10b44ecd..000000000 --- a/_freeze/minutes/posts/10Mar2025/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "dc2446a74fdb38693430e0ef46fb1fac", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"GDPR for PHUSE WGs & Latest repo updates\"\ndate: \"10 Mar 2025\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 10_Mar_25
Lyn Taylor Yes
Christina Fillmore Yes
Chi Zhang No
Molly MacDiarmid Yes
Benjamin Arancibia No
Michael Kane No
Martin Brown No
Stephen McCawille Yes
Miriam Amor No
Peilin Zhou No
Samrit Pramanik No
Brian Varney Yes
Vikrant Vijay No
Yannick Vandendijck Yes
Vikash Jain Yes
Michael Walshe No
Anwesha Roy No
Min-Hua Jen Yes
Jaskaran Saini Yes
Mariusz Zieba No
Chelsea Dickens No
Tejas Pandit No
Ashwath Gadapa No
Sarah Brosens No
Kirsten Findlay No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- Blog Update (Molly / Vikash)\n\n - Yannick's Tobit regression blog was sent to PSI enews, PHUSE & will be on the CAMIS blog page shortly.\n\n- Content updates in the last month ! (Christina / Lyn / Yannick)\n\n - Cox- PH update for Ties=Exact & Convergence (Nan Xiao & Abi terry)\n\n - V 3.2-1.3 of package changed in R, how ties can be handled in `survival::coxph` function. Now instead of options for Breslow, Efron & discrete, now R can do Exact method & this matches SAS. Also, page had added a description of convergence methods to explain differences caused by convergence.\n\n - CIs for Props (Lyn)\n\n - Now has a section using `desktools:BinomDiffCI` for 2 independent samples\n\n - Logistic regression (Lyn)\n\n - Now complete. (NOTE discussion the call regarding how package authors can write their own\\\n S3 class R objects which overwrite defaults.... however, something like confint.default() can still be used to revert to the default wald method, incase of the confint() profile likelihood method.\n\n This is why it's dangerous to call variables function.variable as you may overwrite a special class of objects in R.\n\n - Reference based Multiple imputation joint modelling continuous data (Yannick)\n\n - Leads the reader through R, SAS & the comparison. Full description of the LSHTM 5 SAS macros for this, vs R and found to agree!\\\n NOTE: that for Rbmi, Daniel Sabnane Bove, will be updating the package to include MMRM (at the moment it only does ANOVA).\n\n - Sample size for Bioequivalence (Andrey Ogurtsov) - TOST sample size added.\n\n- RE: the Table of contents, feel free to suggest changes for the required categories vs content.\n\n- NOTE: that we still have some pages, which could be classed as quick wins which are simple to create like SAS page for poisson/negative binomial. If you want to volunteer for anything add and issue or check for existing [issues](https://github.com/PSIAIMS/CAMIS/issues) and assign yourself (or add comment that you are working on it.)\n\n- Repo Tech\n\n - Some complex methods may slow repo creation down.. Ok for now.\\\n Could update the running so only re-runs if code changes or only re-run if any of the packages change that the code uses. (if that's possible).\n\n - Is it useful, to use Riskmetric to assess quality of package? Possibly not, because riskmetric doesn't handle stats packages very well. something like survival can appear 'risky' but it's just because it was developed so long ago & hasn't been updated because it doesn't need to be updated ! Maybe add page on CAMIS talking about risk assessment of stats packages - how to assess trustworthyness.\n\n- GDPR for PHUSE WGs - **FORM IS BY CLICKING ON WORD FORM IN THE EMAIL.**\n\n The PHUSE Office has been reviewing our GDPR requirements and the information we hold on Working Group Members. As a result of this review, we have created a Working Group member form to capture the information we need to run effective and impactful Working Groups and project teams. As part of this we need to capture your consent to both hold basic information (name, email & company) and to use this in the context of PHUSE Working Groups.\n\n Consequently, it is now a mandatory requirement that all members of PHUSE Working Groups complete this [\\*form\\*](https://forms.office.com/Pages/ResponsePage.aspx?id=xeEJLj1cykuXxFc6VpX1UKYKFE6i0SdEjGxMC4fPoj9UOUVUWkRQN1JXRE9BVU5UVTZIUUFHTEszMS4u) to enable your continued participation in project teams and Working Groups. Unfortunately, this means that those who do not respond, will need to be removed from the Working Groups. The deadline for completion of the form is **20 March 2025.** Please select all Data Visualisation & Open Source Technology projects from the list that you participate in.\n\n Additionally, as part of the form there is an opportunity to provide feedback on your experiences in PHUSE Working Groups. Whilst this is not mandatory, we would appreciate any feedback, particularly around any barriers/challenges you face that limits your participation, or any general feedback, both good and bad.\n\n If you have any questions or feedback on the form, please contact the office at [workinggroups\\@phuse.global](mailto:workinggroups@phuse.global \"mailto:workinggroups@phuse.global\"). Thank you for your attention to this matter and we very much look forward to your continued support in the future.\n\n- AOB\n", - "supporting": [ - "10Mar2025_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/10Oct2024/execute-results/html.json b/_freeze/minutes/posts/10Oct2024/execute-results/html.json deleted file mode 100644 index 11ce0a5ec..000000000 --- a/_freeze/minutes/posts/10Oct2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "cdca195c81acdd3305300afc2b370284", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Gen AI for SAS-->R code, Achievements + 2025 objectives\"\ndate: \"14 Oct 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 14_oct_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang No
Orla Doyle No
Harshal Khanolkar Yes
Lily Hseih No
Filip Kabaj No
Martin Brown No
Min-Hua Jen Yes
Sarah Rathwell No
Kasa Andras No
Aditee Dani No
Keaven Anderson No
Benjamin Arancibia No
Wilmar Igl No
Vikash Jain No
Mia Qi Yes
Leon Shi No
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak No
Michael Kane Yes
Lukas Brausch Yes
Michael Walshe Yes
Seemani Abhilipsa No
Aiming Yang No
Cuifeng Yin No
Todd Coffey No
Jayashree Vedanayagam Yes
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy Yes
Samrit Pramanik Yes
Agnieszka Tomczyk No
Prem Kant Shekhar Yes
Sunil Yes
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n## Gen AI to convert SAS code to R code (Brian)\n\nAI (such as chatgpt) can be used to convert SAS code to R or vice-versa. You can even upload a zip file containing multiple programs and it will unzip & convert. It even lists the packages it thinks it needs & can convert SAS macros into an R function (for example).\n\nIt's not 100% reliable, and it does need work once translated but huge help if starting from scratch.\n\nSome caveats:\n\n- All code written is in lower case so you'd have to check that if your variable names are in mixed or upper case (as R case sensitive).\n\n- chatgpt would also store your data, so don't load anything up that is company sensitive!\n\n## Visibility of our blogs /Future blogs (Harshal)\n\nDiscussed who sees the blogs, only 2 / 17 on the call saw it. Somehow we need to find a way to get more visibility! Any ideas let us know, but plan to do more blogs in future and will help if all share them.\n\n## Conferences update (Lyn/All)\n\n- PHUSE EU connect meet up ! (Christina to arrange meeting face to face)\\\n Michael Walshe, Anwesha Roy, Stephen Mccawille, Kate Booth, Agnieska Tomczyk are going\n- PHUSE US connect (Cuifeng? ): Lyn to follow up if anyone going.\\\n Objective 2025: expand our team members that we have in the USA.\n- PHUSE FDA CSS will have a event in Utrecht, the netherlands running side by side with the Silver Springs, Maryland\n- PSI 2025 deadlines are as follows:\n - Oral abstract submission -22nd November 2024\n - Notification - no later than 16th December 2024\n - Poster abstract submissions -25th February 2025\n\n## Content updates (Chrstina / All)\n\nDiversity alliance hackathon will be addressing/reviewing open issues (especially 'good first issue' ones) To help with this event, please raise any Issues - preferably small changes that re needed for the Diversity alliance hackathon to use as example issues to open pull requests to resolve --\n\nOpen pull request = NONE ! Great work Christina!\\\nHuge Achievement for 2024: 200 closed pull requests\n\nSurvival (Christina) Meetings ongoing, if you want to join contact Christina. Objective 2025: Focus is on Accelerated failure time models.\n\nMMRM (Lyn) Objective 2025: to get updated such that the R, SAS & Comp are consistently written\n\nIf anyone else is assigned an area to research and needs help or can no longer commit to completing content just let us know and we can get someone to assist you or work to re-assign it.\n\n## Previous Actions Items Update\n\n- Add page on how to run/conduct a hackathon: Perhaps drop this action unless someone thinks it's useful? Please let us know if you want us to add this else we'll not maintain the page & wind it down\n\n- Add a hackathon page: Question to ALL -- do people want to do hackathons in their company to encourage open source? If we could write the guidance, then could link to it from linkedin. To a webinar or guidance. But if wont be used, wont put this highest on our list of To Do!\n\n- Finding documentation for 'old' versions of the base R \"stats\" package (sarah/ christina) Hard to find this documentation. For this reason, need to use Eval: TRUE, so it runs using latest version and the output will be the latest output. However, in our comparison -- often these are typed in, especially as you can't get live output run from SAS. So if Table is not current... we wont know!\n\nObjective 2025-2026!: Rethink this in 2025. Can we add 'testthat' expect equals. So we are notified if something changes? Would have write/save SAS number in dataset to compare electronically with R. For tables with no numbers (listing defaults), we could check default as well. Could also not reproduce entire repo each time... but then run risk of things breaking (only running at snapshots of different versions). Printing of the Versions are now visible on the templates so please use the template so the versions appear In the run content\n\n## Goals\n\n45 issues closed by end of year (surpassed already as 70 closed now!) NOTE: a lot are open because we opened as examples for the diversity alliance hackathon- will be closed in next few weeks after that event. Would like 1 page by end 2024. 200 pull requests closed out!!\n\nMMRM content: Stephen Waugh assigned to work on this until June 2025 as his dissertation project\n\nWebpage for listing dissertation projects? TBC if this is needed or doing OK assigning through volunteers.\n\n## New 2025 Goals (and some carried forward!)\n\n- expand our influence (particular through representation in USA)\n\n- Advance our MMRM pages\n\n- Advance our survival pages\n\n- Reconsider our infrastructure with respect to\n\n - renv\n\n - live running of repo and knowing when a version changes our content\n\n - making it easier for people to contribute\n", - "supporting": [ - "10Oct2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/11Aug2025/execute-results/html.json b/_freeze/minutes/posts/11Aug2025/execute-results/html.json deleted file mode 100644 index 56b96630c..000000000 --- a/_freeze/minutes/posts/11Aug2025/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "cceb72cf4529a7cd5a501c630824c9bc", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Tipping point, ISBC Poster\"\ndate: \"11 Aug 2025\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 11_Aug_25
Lyn Taylor Yes
Christina Fillmore No
Chi Zhang Yes
Molly MacDiarmid No
Benjamin Arancibia No
Michael Kane No
Martin Brown No
Stephen McCawille No
Miriam Amor Yes
Peilin Zhou No
Samrit Pramanik No
Brian Varney Yes
Vikrant Vijay No
Yannick Vandendijck Yes
Vikash Jain No
Michael Walshe No
Anwesha Roy No
Min-Hua Jen No
Jaskaran Saini Yes
Mariusz Zieba Yes
Chelsea Dickens No
Tejas Pandit No
Ashwath Gadapa Yes
Sarah Brosens No
Kirsten Findlay Yes
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n**New Content This Month**\n\n- Christina's guidance for how to select an appropriate R package to use for your work is now available [here](https://psiaims.github.io/CAMIS/contribution/Package-Review.html). You can also access it from [contribution](Get%20Involved) page, under the 'Asking for help' section using the short guidance link.\n\n- Sarah talked us through her Tipping point analysis [R page](https://psiaims.github.io/CAMIS/R/tipping_point.html) using {rbmi}. This page now contains a description of what tipping point analysis is, variations you can use and a case study example of how to investigate various delta to find the tipping point. The page demonstrates really helpful plots and easy to write code (so easy compared to SAS)!. Keep watch of the repo table of contents for arrival of the SAS page & Comparison page in the near future. With enough simulations R & SAS were found to match to 3 decimal places.\n\n- Lyn raised that Christina and her are still finding interesting issues with even simple analyses such as ANOVA. More information will be added shortly to the [ANOVA](ANOVA) page with regards to contr.treatment (the default which should not be used due to non-orthogonal contrasts), and the rationale of why we use contr.sum and contr.poly instead !\n\n- Miriam is writing GLMM pages including the methods using Laplace, GHQ & PQL, and will present this work at the PHUSE EU conference in Hamburg in November. Watch the repo for these pages in the near future.\\\n\n**Conferences.**\n\n- Miriam & Yannick are both presenting at PHUSE EU, so reach out to them if you are going and would like to meet up.\n\n- Yannick showed the team his ISBC CAMIS summary poster which will soon be available on the repo. Remember if you are attending any conferences or need to present a poster there is content you can use on the repo under [Conferences](Conferences).\n", - "supporting": [ - "11Aug2025_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/11Mar2024/execute-results/html.json b/_freeze/minutes/posts/11Mar2024/execute-results/html.json deleted file mode 100644 index a0223b4d1..000000000 --- a/_freeze/minutes/posts/11Mar2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "b833f95ada53647171d17e8f4e3db9b4", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"How to select packages, Content & Conferences\"\ndate: \"11 Mar 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 11_Mar_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang No
Orla Doyle No
Harshal Khanolkar No
Lily Hseih Yes
Filip Kabaj No
Martin Brown Yes
Min-Hua Jen No
Sarah Rathwell No
Kasa Andras Yes
Aditee Dani No
Keaven Anderson Yes
Benjamin Arancibia Yes
Wilmar Igl No
Vikash Jain Yes
Mia Qi No
Leon Shi Yes
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel Yes
Kyle Lee No
Chelsea Dickens Yes
David Bosak Yes
Michael Kane Yes
Lukas Brausch Yes
Michael Walshe Yes
Seemani Abhilipsa No
Aiming Yang No
Cuifeng Yin No
Todd Coffey No
Jayashree Vedanayagam No
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy No
Samrit Pramanik No
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- Thank you to all those who submitted content this month, especially Chi, David and Filip who all helped to complete new sections. The top section is now almost complete and the first python content will be loaded in the next few weeks which is a great milestone for the project. Watch out for the new column appearing in the repository Table of contents!\\\n Please remember even if you dont want to contribute to a section on your own, you can still review current content and propose improvements.\n\n- Conference planning. Reminder that if you are attending a conference to represent CAMIS to add the detail [here](https://psiaims.github.io/CAMIS/Conferences.html). We need to ensure we continue to advertise the project to encourage people to use the repo and add content. So far in 2024, only 3 conferences being attended, so if you are interested in attending a conference just reach out to Lyn & Christina who can help you with an abstract if needed.\\\n \\\n Vikash fed back about the PHUSE US Connect conference. CAMIS was mentioned by Michael Rimler in the keynote speech and Soma/Vikash presented a poster so we received great publicity. Brian also attended meeting Soma & Vikash face to face. Thank you to all of you. The abstract for PHUSE FDA CSS has been written by Soma and submitted so all on track.\n\n- Reminder to complete CAMIS membership form\\\n \n\n NOTE:  We will only collect: team members name, email address, organization, software used, interested in oncology, key interests and affiliations to stats organizations.  The email address is solely for the CAMIS leadership team, to make sure you are included in CAMIS emails.\\\n We ask on the form: \"Are you happy for your Name and company and interests to be visible on the CAMIS website. Note that email addresses will not be visible\". \n\n If you do not give permission then your name will not appear on the CAMIS repo as a CAMIS team member. If you do give permission, your name and company and interests will appear but your email addresses will be hidden from public view. At any time you can ask to be removed from the website team list by emailing me.\n\n- Selection of packages: As we continue to grow the number of packages stored in the repository is growing. We realized that this may lead to conflicts and issues for the repo running. We also dont really want packages installed that are no longer used, known to have issues. Therefore if you are writing up an analysis and there are two packages doing similar things, we would like to request that you select the one that is the most commonly used and best quality (i.e. lowest risk). Risk can be assessed using the {riskmetric} package and {riskassessment} application, using the default scoring, but packages risk should also be considered in context of the individual components such as being actively maintained, bug fixes, code coverage, with references, with a github repo or website, by a trusted author and with results being correct vs stats method.\n\n It can be very useful to test multiple packages if they are able to do slightly different analysis (i.e. with different options), in these cases it's useful to include a Table at the top of the comparison summary qmd, to show which package does which analysis, see [Comparison of 1 sample t-test](https://psiaims.github.io/CAMIS/Comp/r-sas_ttest_Paired.html) as an example.\n\n Packages that are inferior to others, should not be loaded to the repo, but instead you can add a textual summary of your findings. For example, \"Package X also gives the same results\" or \"Package X can be used but doesn't have options to do X and Y\" or \"We do not recommend Package X as during testing, the results are not in line with the statistical methodology\".\\\n NOTE that we agreed not to have a library of packages \"approved\" for CAMIS, RENV stores the lock file of the packages in our repo and we do not want to be seen to giving recommendations for/against packages, other than factual evidence based on the analysis they produce.\n\n It was noted that when you load the RENV.lock file, it may give a \"error downloading\" bioconductor warning, this can be ignored, and should not cause issue if you aren't using these packages. In the future, these packages will be removed as dependancies from mmrm and the issue will resolve.\n\n- AOB\n\n - We had a discussion surrounding communication methods currently used on the project. RE: using teams vs emails, feedback was mixed, Argument for Teams was that it keeps all discussion in one place, and doesn't fill you in box, but arguments against were that as you often have to log out of your company teams, to log into the PHUSE one, messages are often missed / ignored.\n\n We agreed to perhaps send 2 emails a month, the agenda, but also any other important updates that occurr during the month & minutes. This will be supported by also posting on social media. Although small sample, we assessed how many people observed the recent post RE: Soma's poster (only Lyn & Christina of those on the call saw the post), however when asked re: Other PHUSE posts 7 were getting them. Leadership team to discuss and see if we need to post using PHUSE admin?\n", - "supporting": [ - "11Mar2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/11Sept2023/execute-results/html.json b/_freeze/minutes/posts/11Sept2023/execute-results/html.json deleted file mode 100644 index 0b0f85877..000000000 --- a/_freeze/minutes/posts/11Sept2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "f972e065ad912875abf950005650c281", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Conference updates & feedback, FDA quartely meeting, CAMIS-ONCO workshop\"\ndate: \"11 Sep 2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 11_sep_2023
Aiming Yang No
Ben Arancibia No
Brian Varney Yes
Christina Fillmore No
Chelsea Dickens Yes
Chi Zhang No
Clara Beck No
Aditee Dani Yes
Doug Kelkoff No
Dhvani Patel Yes
Filip Kabaj No
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam No
Joe Rickert No
Kyle Lee No
Leon Shi No
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi No
Michael Kane No
Michael Rimler No
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj Yes
Paula Rowley No
Soma Sekhar Sriadibhatla Yes
Vandana Yadav No
Vidya Gopal Yes
Vikash Jain Yes
Wilmar Igl No
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n**PHUSE SDE: Missisauga:** June 8th feedback: Jayashreee not available today so see if any feedback at next meeting\n\n**PHUSE FDA CSS Poster acceptance, white paper planning, PHUSE US Connect planning & CAMIS ONCO update:** Soma Sekhar / Vikash Jain\n\nPoster presentation is next week. Harshil will advertise on social media.\\\nSubmitted a abstract for poster & presentation/workshop for US Connect 2024. White paper ongoing. Python code for CAMIS-ONCO being created. Workshop for PHUSE CSS 2024 will need planning. DVOST subgroup ½ day workshop **ACTION** Soma/Vikash work on the CSS workshop For 2024 .\n\nVikash was at the PHUSE SDE Boston last week. Met Michael Rimler and he's introduced CAMIS to the board so they are aware of it.\n\n**RSS conference feedback** - Lyn Suggestions were to get more academics, university representatives involved with the project as it also makes for nice disseration projects for BSc/MScs.\n\n**IBS CEN2023 conference** - Feedback from Chi IBS CEN2023 conference (biometric society central Europe network held in Basel), CAMIS was mentioned along with other R working groups in a talk. The talk was about software engineering working group, the one that developed MMRM. Quite encouraging!\n\n**Social Media update** : Harshal\\\nCall for volunteers.\n\n1) **Software Engineering Working Group** --\n 1. WG goals are:\n\n 1. Collaborate to engineer selected R-packages which will fill in gaps in the open-source statistical software landscape, and to promote software tools designed by the working group through publications, conference presentations, workshops, training courses, and others.\n\n 2. Develop best practices for engineering high-quality statistical software, and promote the use of best practices in the broader Biostatistics community via public training materials.\n\n 3. Communicate and collaborate with other R software initiatives.\\\n\n 2. Working group HomePage - https://rconsortium.github.io/asa-biop-swe-wg/\n\n 3. Co-chairs - Daniel Sabanes Bove and Ya Wang.\n\n\n\n2) **Statistical Methods in Oncology Scientific Working Group** -- WG goals are:\n 1. Encourage increased use of systematic oncology assessment approaches and selection of best methods through training and education\n\n 1. Gain clear understanding of current regulatory environments in oncology\n\n 2. Prepare a library of recommended methods including innovative methods\n\n 3. Understand commonly used and innovative methods\n\n 4. Collect and share experiences on using innovative designs\n\n 5. Put together points to consider for oncology innovative designs' implementation\n\n 6. Develop new methods if needed\n\n 2. Educate the broader statistical community to understand and contribute to this important area\n\n 3. Increase statisticians' leadership roles in cross-functional collaboration\n\n 4. Communicate statistical perspectives to larger clinical trial community\n\n 5. Co-chairs - qjiang\\@seagen.com and olga.marchenko\\@bayer.com\n\nAdittee volunteered to join the Stats methods in oncology Scientific working group to represent us. ACTION: Aditee to reach out to co-chairs and ask to join, then feedback at our meetings on if there are opportunities to collaborate.\\\nAlso looking for volunteers to work on the **Software Engineering WG &** **CAMIS ONCO- white paper**, Harshil will work with Vikash & Soma to request that PHUSE share to advertise what we are looking for in a linkedIn post.\n\n**Preparation for PHUSE FDA Quarterly meeting** 27th sept: Slides / Survey questions : Lyn / harshil Tomorrow we can get the data back from questionnaire.   ACTION : Lyn/Harshil to meet, summarize survey & prepare slides.   Friday.\n\n**Website** Christina/ Chi / Jayashree\n\n- Currently, needs fixing pending RENV issue. So will be slight delay on getting content on the website\n\n- Content curation lead items - Chi / Jayashree Made a great start to close discussions & address actions which have questions on them.\n\n- MMRM update - No update ACTION: Lyn to follow up.\n\n- github training plan (R/Pharma workshop & PSI training course) - ongoing prep for workshop & course through PSI AIMS team.\n\n- Agnieszka (PAREXEL) and Chi -- working on Wilcoxon test content for paired & unpaired data.\n\n**Upcoming conference planning**\n\n- PHUSE SDE New York: Oct 16th : Aiming\n\n- PHUSE US Connect: Soma/Vikash\n\n- R/Pharma -- Christina?\n\n- SESUG (South East SAS user group) late october 2023, Brian will present on CAMIS.\n\n- North Carolina - SDE if anyone wants to volunteer to attend let us know.\n\n**AOB** - None.\n", - "supporting": [ - "11Sept2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/12Aug2024/execute-results/html.json b/_freeze/minutes/posts/12Aug2024/execute-results/html.json deleted file mode 100644 index fd13671e7..000000000 --- a/_freeze/minutes/posts/12Aug2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "05977e45043abc3564235d49c7178ff3", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Novartis Hackathon & Content growth\"\ndate: \"12 Aug 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 12_Aug_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang Yes
Orla Doyle No
Harshal Khanolkar Yes
Lily Hseih No
Filip Kabaj No
Martin Brown No
Min-Hua Jen No
Sarah Rathwell No
Kasa Andras No
Aditee Dani No
Keaven Anderson Yes
Benjamin Arancibia No
Wilmar Igl No
Vikash Jain No
Mia Qi No
Leon Shi No
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak Yes
Michael Kane No
Lukas Brausch No
Michael Walshe Yes
Seemani Abhilipsa Yes
Aiming Yang No
Cuifeng Yin Yes
Todd Coffey No
Jayashree Vedanayagam Yes
Ashwath Gadapa Yes
Miriam Amor Yes
Anwesha Roy Yes
Samrit Pramanik Yes
Agnieszka Tomczyk Yes
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n**Welcome**\n\nWelcome to our new team members on the call.  We have an agenda which is sent prior to the meeting, but please do please ask if you have questions or need clarification regarding what we are talking about.\n\n### Repository Content\n\n**Novartis Hackathon**\n\nThe Hackathon was a great success, with CAMIS receiving it's largest amount of pull requests in any one month to date ! Christina fed back that it was not as much work as you might think, as although they had 'office hours' to help people and discuss things, actually not many people used them. Instead, they gave a brief introduction to everyone attending, explaining git / github / pull requests etc, then assigned everyone a topic / mini projects to investigate and let them do the research in their own time.\n\nOrla & team reviewed each others pull requests prior to submission to CAMIS repo. Christina worked alongside the team approving the content to go live.\n\nAs it went so well, Christina is planning another hackathon style event in October, but it will be on wider concept, focused on people who haven't done pull requests to get more involved with open source projects. We can submit CAMIS topics for them to complete.\n\nIf you or your company interested in hackathon ask christina.\n\n**Survival**\n\nWill continue to meet and discuss survival needs, some Accelerated failure time content going in as part of hackathon.\n\n**Assignment table / Raising issues**\n\n**ACTION:** Lyn to remove and we will use issues instead, If you have been on a issue for \\>6 months and have not provided us with any feedback on your progress, then you will be unassigned. Obviously if you need more time and are still working on it, just let Christina or another one of the project leads know.\n\nIf you want to research a topic, please add an issue or issue comment to tell us what you are working on.\n\n**Ensuring content quality & cross page cohesiveness**\n\nTo date, focus was on population of the pages we were missing, however as we move towards having content available, we will need people to review that content to ensure it is of high quality and makes sense. Ideally, the same dataset would be run through R and SAS showing options, then the comparison would discuss the discrepancies in the results obtained and perhaps the differences in default options / available options.\n\nWe now have a template for the SAS, and R pages.. and a different one for the comparison pages. This may help us with consistency. Please use the templates if you are starting the work now, it's Ok to do a PR for anything you are already working on, even if the template wasn't used.\n\n### **Conferences**\n\n- PHUSE EU Brussels 23rd Sept - Qian Wang (Merck) will attend.\n\n- R/Pharma APAC track -- Samrit has submitted an abstract so hopefully will be able to represent us.\n\n- SouthEast SAS User Group (SESUG) -- Bethesda: Brian will give us a mention.\n\n- PHUSE EU -- Stephen Mccawille, Anwesha Roy, Agniekska Tomczyk & Christina Fillmore are all attending PHUSE EU -- if you are attending let Christina know and she will arrange for CAMIS team to meet up for a cuppa & chat at the conference.\n\n- Phuse US Connect November: Maryland. Cuifeng Yin may be able to attend. Lyn asked for volunteers to represent us a US connect and maybe CSS? **ACTION :** ask PHUSE re: getting a workshop or seminar for CAMIS if we can find volunteers, TBC if at (US CONNECT / or CSS? Or both? )\n\n- Chi provided feedback from UseR! We may consider using a better title for abstracts, not just CAMIS. The talk was put into a community stream instead of a multilingual programming stream, so may have got better attendance, but still it was well received. 1 person attended requested that they would like to write an article on CAMIS, so Chi will work with them on that.\n", - "supporting": [ - "12Aug2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/12Dec2022/execute-results/html.json b/_freeze/minutes/posts/12Dec2022/execute-results/html.json deleted file mode 100644 index 5e00fa446..000000000 --- a/_freeze/minutes/posts/12Dec2022/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "e3a4c49caccfa3a666c517de24837431", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Restart Meeting\"\ndate: \"12Dec2022\"\noutput:\n html_document: \n toc: true\n toc_float: false\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stderr}\n\n```\nNew names:\nRows: 34 Columns: 14\n── Column specification\n──────────────────────────────────────────────────────── Delimiter: \",\" chr\n(13): attendees, 12_dec_2022, 23_Jan_2023, 13_feb_2023, 13_mar_2023, 17_... lgl\n(1): ...14\nℹ Use `spec()` to retrieve the full column specification for this data. ℹ\nSpecify the column types or set `show_col_types = FALSE` to quiet this message.\n• `` -> `...14`\n```\n\n\n:::\n\n::: {.cell-output-display}\n\n\n\n\n \n \n \n\n
\n\n|attendees |12_dec_2022 |\n|:------------------------|:-----------|\n|Aiming Yang |No |\n|Ben Arancibia |Yes |\n|Brian Varney |No |\n|Christina Fillmore |No |\n|Chelsea Dickens |No |\n|Chi Zhang |No |\n|Clara Beck |No |\n|Aditee Dani |No |\n|Doug Kelkoff |No |\n|Dhvani Patel |No |\n|Filip Kabaj |No |\n|Harshal Khanolkar |No |\n|Iris Wu |No |\n|Jayashree Vedanayagam |Yes |\n|Joe Rickert |Yes |\n|Kyle Lee |Yes |\n|Leon Shi |No |\n|Lily Hsieh |Yes |\n|Lyn Taylor |Yes |\n|Martin Brown |Yes |\n|Mia Qi |Yes |\n|Michael Kane |No |\n|Michael Rimler |No |\n|Mike Stackhouse |No |\n|Min-Hua Jen |Yes |\n|Molly MacDiarmid |Yes |\n|Mona Mehraj |No |\n|Paula Rowley |Yes |\n|Soma Sekhar Sriadibhatla |Yes |\n|Vandana Yadav |No |\n|Vidya Gopal |No |\n|Vikash Jain |Yes |\n|Wilmar Igl |No |\n|Orla Doyle |No |\n\n
\n\n\n:::\n:::\n\n\n\n# Welcome and brief CAMIS project update: Lyn\n\n``` \nPlease consider which areas of the project you would like to be involved with: \n * Repository reviewers/framework reviewers\n * Content creators (Comparing analysis method implementations in software)\n * Github - content review / approval\n * Marketing, i.e. blogs and sharing with wider community (PSI, ASA, PHUSE etc) to encourage contributions \n * Long term plan - Extend reach beyond Europe/USA.\n```\n\n# Repository roadmap : Lyn\n\n``` \n * Sample website & templates – mid January 2022\n * Feedback on website/templates – EOB Feb 2022\n * Revisions – March 2022\n * Launch – April 2022\n```\n\n# White paper status update: Min-Hua\n\n``` \nNOTE: we would like to put the URL of new website and mention CAMIS in paper if possible?\n```\n\n# Other stream updates: All\n\n``` \nNeed to identify who were the previous stream leads to check with them we can put content into new template formats.\n * CMH\n * Mixed models\n * Linear models\n```\n\n# Questions/ AOB - All\n\n``` \n * Future meeting plan – Lyn set up directly so can be quickly adjust/ add more meetings if necessary?\n * Name change: CAMIS: Comparing analysis method implementations in software\n * Do we need our own logo. CAMIS. Volunteers?\n * Supported by PHUSE & PSI & ASA. Assign rep (or reps) for each organization. \n * Extend membership given many previous members no longer on project\n * Volunteer needed – can someone create a comparison using any method (but comparing SAS to Python/Julia or R to Python/Julia) – so we can test up with not just R Vs SAS.\n * AOB.\n```\n", - "supporting": [ - "12Dec2022_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/12Feb2024/execute-results/html.json b/_freeze/minutes/posts/12Feb2024/execute-results/html.json deleted file mode 100644 index b212d802d..000000000 --- a/_freeze/minutes/posts/12Feb2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "8405b851a50d17465369e7b4732e1cae", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Website structure update, Team list, Conferences\"\ndate: \"12 Feb 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 12_Feb_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang Yes
Orla Doyle No
Harshal Khanolkar No
Lily Hseih No
Filip Kabaj No
Martin Brown Yes
Min-Hua Jen No
Sarah Rathwell Yes
Kasa Andras No
Aditee Dani Yes
Keaven Anderson Yes
Benjamin Arancibia Yes
Wilmar Igl Yes
Vikash Jain Yes
Mia Qi Yes
Leon Shi Yes
Vandaya Yadav Yes
Stephen McCawille Yes
Vikrant Vijay Yes
Vidya Gopal Yes
Dhvani Patel Yes
Kyle Lee Yes
Chelsea Dickens No
David Bosak No
Michael Kane No
Lukas Brausch No
Michael Walshe No
Seemani Abhilipsa No
Aiming Yang No
Cuifeng Yin No
Todd Coffey No
Jayashree Vedanayagam No
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy No
Samrit Pramanik No
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- Updated website demo -- Chi & Christina\n\n- CAMIS membership form / data collection -- Lyn\\\n \n\n NOTE:  We will only collect: team members name, email address, organization, software used, interested in oncology, key interests and affiliations to stats organizations.  The email address is solely for the CAMIS leadership team, to make sure you are included in CAMIS emails.\\\n We ask on the form: \"Are you happy for your Name and company and interests to be visible on the CAMIS website. Note that email addresses will not be visible\". \n\n If you do not give permission then your name will not appear on the CAMIS repo as a CAMIS team member. If you do give permission, your name and company and interests will appear but your email addresses will be hidden from public view. At any time you can ask to be removed from the website team list by emailing me.\n\n- Phuse css workshop for 2024 / CAMIS ONCO-- Soma/Vikash/Harshil\n\n - Filip, Lyn & Chrstina met re: Python content\n\n - Next steps for workshop & white paper\n\n- Other conferences\n\n - Keaven - attending JSM & ISBN - and will mention CAMIS.\n\n - Chi attending use R conference, Lyn/Christina/All: please review abstract if you wish.\n\n - Regulatory stats workshop: Leon Shih (poster)\n\n- Volunteers requested for:\n\n - Openstatsware Bayesian MMRM  {brms.mmrm} package input :  Christine/Orla\n\n - MMRM - Volunteer please to look at Proc Mixed vs Proc GLIMMIX for the SAS/mmrm.qmd file (Stephen McCawille & Leon Shi may be able to look at this in future ).\n\n- AOB\n\n - Christina: Create a 1 slide - This is CAMIS.\n\n - Lyn: Load up Dec2023 blog post\n\n - Chi: Add links to blogs & add blog tab when we have content\n", - "supporting": [ - "12Feb2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/12May2025/execute-results/html.json b/_freeze/minutes/posts/12May2025/execute-results/html.json deleted file mode 100644 index 9e8155687..000000000 --- a/_freeze/minutes/posts/12May2025/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "089258be2823d2752935032466590407", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Blogs, New content, Conferences (2)\"\ndate: \"12 May 2025\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 12_May_25
Lyn Taylor Yes
Christina Fillmore Yes
Chi Zhang No
Molly MacDiarmid Yes
Benjamin Arancibia No
Michael Kane No
Martin Brown No
Stephen McCawille No
Miriam Amor No
Peilin Zhou No
Samrit Pramanik Yes
Brian Varney Yes
Vikrant Vijay No
Yannick Vandendijck Yes
Vikash Jain No
Michael Walshe Yes
Anwesha Roy No
Min-Hua Jen No
Jaskaran Saini Yes
Mariusz Zieba Yes
Chelsea Dickens Yes
Tejas Pandit No
Ashwath Gadapa No
Sarah Brosens No
Kirsten Findlay No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\nBlogs:\n\n- Vikash blog on PHUSE US\n\n- Molly/Lyn : Meeting with PHUSE admin team tomorrow re: size & frequency of blogs\n\nConferences:\n\n- PSI 8th June. Stickers now printed & will be handed out including QR code to repo\n\n- R in medicine: 11th June. Only \\$40 or so to attend so if anyone at your companies want to learn more it's a good one to attend.\n\n- PharmaSUG -- Brian attending Yannick also applying to attend\n\n- PHUSE EU. R/Pharma Nov 3-7th info TBC. call for abstracts not yet open.\n\n- R in HTA workshop - conference. Stephen attending in June.\n\n- Jaskaran is presenting in PHUSE CSS: Synthetic data.\n\n- PSI event in Cambridge England re: moving to R: 3rd July. Yannick attending & can mention CAMIS.\n\nNew content:\n\n- Binomial test in SAS -- noted comp page missing, so Jaskaran volunteered to do comparison page & Cochran mantel haenzel test in python.\n\n- Sample size Cochran Armitage test for trend in R and SAS/StatXact\n\n- Added to the Friedman Test\n\n- Sample size Equivalence R & SAS.\n\n- Working group on confidence intervals & new package development (GSK Pfizer & roche doing CI's for proportions). Cardx ones will move into citools (and come out of cardx as that should be manipulation only), and will add other methods such as CI stratified MN. DescTools is a wide ranging package so harder to validate for a GxP validated environment, so taking the CI's from this package into a separate citools package. One stop place for all CIs for proportions and will have some odds ratios too.\n\n- Poisson / negative binomial regression still needed - Lyn to do in July if poss\n", - "supporting": [ - "12May2025_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/13Feb2023/execute-results/html.json b/_freeze/minutes/posts/13Feb2023/execute-results/html.json deleted file mode 100644 index b07672624..000000000 --- a/_freeze/minutes/posts/13Feb2023/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "e3fe8f293d383a514a21478ef5364ece", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"White Paper and Demo of connecting Rstudio with Github repo\"\ndate: \"13Feb2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 13_feb_2023
Aiming Yang Yes
Ben Arancibia Yes
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens No
Chi Zhang No
Clara Beck No
Aditee Dani No
Doug Kelkoff No
Dhvani Patel No
Filip Kabaj No
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam Yes
Joe Rickert No
Kyle Lee No
Leon Shi Yes
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane Yes
Michael Rimler Yes
Mike Stackhouse No
Min-Hua Jen Yes
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla No
Vandana Yadav No
Vidya Gopal No
Vikash Jain No
Wilmar Igl No
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Meeting minutes\n\nMin-Hua went through outstanding comments on the white paper.\n\nChristina did a demo of how to set up R studio to link through via git project to the CAMIS github repo. See \"13Feb2023_Contributing to the CAMIS project_Setting up communication between github and R studio\" for more information\n\n# Next meeting: 13th March 2023: 4:30 UTC, 11:30 EST.\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/13May2024/execute-results/html.json b/_freeze/minutes/posts/13May2024/execute-results/html.json deleted file mode 100644 index 3bef886f0..000000000 --- a/_freeze/minutes/posts/13May2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "f8052bdc7cfcae8e0e0d1a770c962eb4", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"2024 Goals\"\ndate: \"13 May 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 13_May_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney No
Chi Zhang Yes
Orla Doyle No
Harshal Khanolkar Yes
Lily Hseih No
Filip Kabaj No
Martin Brown Yes
Min-Hua Jen Yes
Sarah Rathwell No
Kasa Andras No
Aditee Dani No
Keaven Anderson Yes
Benjamin Arancibia Yes
Wilmar Igl Yes
Vikash Jain No
Mia Qi Yes
Leon Shi No
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal Yes
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak Yes
Michael Kane No
Lukas Brausch Yes
Michael Walshe Yes
Seemani Abhilipsa No
Aiming Yang No
Cuifeng Yin Yes
Todd Coffey Yes
Jayashree Vedanayagam No
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy No
Samrit Pramanik No
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n## 2024 Goals\n\n- Our open issues are [here](https://github.com/PSIAIMS/CAMIS/issues)\\\n Currently 32 are closed and we would like to increase this to 45 closed by the end of the year\n- In addition, we'd like to improve some of the incomplete content such as MMRM\n- Create a webpage for listings dissertation projects\n- Have Soma & Vikash represent us at the CSS workshop.\n\n## CAMIS - Onco\n\nTo date there has not been much progress. Given the enthusiasm in the meeting to get things kicked off and starting to increase our survival content on the repository, Christina will set up a kick off meeting for those interested. To look at: weighted logrank, MaxCombo, RMST\n\nThis SAS link may also be [useful](https://support.sas.com/documentation/onlinedoc/stat/151/kaplan.pdf)\n\n## Other content updates\n\nWe now have a new table of assignments saved in the following [readme](https://github.com/PSIAIMS/CAMIS/tree/main/non_website_content) Welcome to edit to update anything you would like to work on, so we keep track of whose working on what. Special shout out to David who has now completed chi-square for SAS and to Lukas for all the python content. Awesome work !\n\nAs SAS Viya is becoming more used now, we agreed OK to add Viya specific code such as proc freqtab as long as it's clear that this isn't a SAS Base procedure.\n\n## Conference planning\n\nReminder that if you are attending a conference to represent CAMIS to add the detail [here](https://psiaims.github.io/CAMIS/Conferences.html). We need to ensure we continue to advertise the project to encourage people to use the repo and add content.\n\n- PHUSE CSS update (Soma/Vikash)\n- R/Pharma -- anyone like to present (openstatsware -- collab, python): Waiting to be open.\n- USER! -- Chi (Accepted)\n- Stephen phuse EU.\n- EU Connect -- Agnieska TBC\n- R/Medicine 2024- Agnieska accepted for Thurs 13th/14th 11am-6pm EST (20 min talk)\n\n## AOB\n\nOur Blog page dosn't appear in date order or aligned! If anyone can fix let us know ! Saved under News [here](https://psiaims.github.io/CAMIS/blogs/)\n", - "supporting": [ - "13May2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/13mar2023/execute-results/html.json b/_freeze/minutes/posts/13mar2023/execute-results/html.json deleted file mode 100644 index b7627a73d..000000000 --- a/_freeze/minutes/posts/13mar2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "43e5653eb8bd8ced5ebf2fe3f183ded6", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"White Paper, Website, ONCO, Volunteers, Conferences\"\ndate: \"13mar2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 13_mar_2023
Aiming Yang Yes
Ben Arancibia Yes
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens No
Chi Zhang No
Clara Beck No
Aditee Dani Yes
Doug Kelkoff No
Dhvani Patel No
Filip Kabaj No
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam Yes
Joe Rickert No
Kyle Lee Yes
Leon Shi Yes
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi No
Michael Kane No
Michael Rimler Yes
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla Yes
Vandana Yadav No
Vidya Gopal No
Vikash Jain Yes
Wilmar Igl No
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n- White paper - Lyn\n- Website progress - Christina\n- CAMIS-ONCO - Soma Sekhar\n- Volunteers Roles\n- Conference Reps\n- AOB/ PHUSE feedback\n\n# Meeting minutes\n\n**White Paper Update: Lyn** Min-Hua is sending the white paper to the PHUSE group lead for review soon just a bit more tidying to do following comments.\n\n**Website progress: Christina\\\n**The home page now has the list of stats methods we are looking to collect data on.\n\n**ACTION :**Christina to put actions for each stats method which we need help to complete into github. We will assign those already selected to the people below. This will enable people that want to help to be able to see which are available for people to select.\n\n**CAMIS- ONCO: Soma Sekhar**\n\n- plan to launch later this week.\n\n- possibly white paper/ conference presentation.\n\n**Review of volunteer roles**\n\n- General Co-ordination -Lyn\n\n- Website Co-ordination / Home page table - Christina\n\n- CAMIS-ONCO - Soma Sekhar\n\n- Copying CSRMLW material to CAMIS\n\n - CMH: Aiming Yang\n\n - Linear Models: Brian Varney (ACTION: Set up call with Dani, Lyn, Vikash, christina, + anyone else whose interested in helping to please volunteer)\n\n - MMRM: Ben Arancibia\n\n - Survival: Min-Hua Jen, Mia Qi\n\nACTION: Christina to also add \"actions\" for people to pick up the following duties.\n\n- Co-ordinator for conference material - share standard slides/ content /abstracts\n\n- Volunteer to design a CAMIS Logo\n\n- Social media rep - to co-ordinate posts (linkedIn/Twitter)\n\n- Conference reps/ attendees needed\n\nWe will also add a page which lists the conferences so we can collate and coordinate whose going with the hope of advertising the project more widely once we have content on the website. Include a column for timelines/ abstract deadlines.\n\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| Conference | Location | Date | Main Contact | Volunteers to attend | Details |\n+==================================================================================+==================================+====================+=====================+======================+====================================+\n| PHUSE US Connect | Orlando, Florida | 5-8 March 2023 | Soma Sekhar | | Presentation |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| DISS (Duke industry statistics symposium) | Virtual | 29-31st March 2023 | Lyn Taylor | Molly MacDiarmid | Poster |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| PSDM(Pharmaceutical statistics and data management) | Netherlands | 19 Apr 2023 | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| IASCT (ConSPIC - conference for statistics and programming in clinical research) | Bengaluru, India | 4-6 May 2023 | Harshal Khanolkar | Harshal Khanolkar | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| PSI 2023 Conference | Hammersmith London West, England | 11-14 June 2023 | Martin Brown | Christina Fillmore | Oral & poster submission completed |\n| | | | | | |\n| | | | | Lyn Taylor | |\n| | | | | | |\n| | | | | Molly Macdiarmid | |\n| | | | | | |\n| | | | | Martin Brown | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| DIA 2023 Global Annual Meeting | Boston MA, USA | 25-29 June 2023 | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| Joint statistical meeting (JSM) | Toronto, Ontario, Canada | 5-10 Aug 2023 | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| ISCB Conference | Milan-Italy | 27-31 Aug 2023 | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| RSS conference | Harrogate, England | 4-7 sept 2023 | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| ASA Bio pharmaceutical Section Regulatory-industry Statistics Workshop | Rockville, Maryland, USA | 27-29 Sept 2023 | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| EASD 2023 - European Association for study of diabetes | Hamberg Germany | 02-06 Oct 2023 | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| PHUSE EU Connect 2023 | ICC Birmingham, England | 5-8 November 2023 | Christina Fillmore? | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n| R in Pharma /\\ | Virtual/ In person | Nov? | Christina Fillmore? | | |\n| POSIT conf. | | | | | |\n+----------------------------------------------------------------------------------+----------------------------------+--------------------+---------------------+----------------------+------------------------------------+\n\n------------------------------------------------------------------------\n", - "supporting": [ - "13mar2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/14Apr2025/execute-results/html.json b/_freeze/minutes/posts/14Apr2025/execute-results/html.json deleted file mode 100644 index fea1d9e7b..000000000 --- a/_freeze/minutes/posts/14Apr2025/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "1912e7ac79ddf6a2f876178918514ac5", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Blogs, New content, Conferences\"\ndate: \"14 Apr 2025\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 14_Apr_25
Lyn Taylor Yes
Christina Fillmore Yes
Chi Zhang No
Molly MacDiarmid Yes
Benjamin Arancibia No
Michael Kane Yes
Martin Brown No
Stephen McCawille Yes
Miriam Amor Yes
Peilin Zhou No
Samrit Pramanik Yes
Brian Varney Yes
Vikrant Vijay No
Yannick Vandendijck Yes
Vikash Jain No
Michael Walshe Yes
Anwesha Roy No
Min-Hua Jen No
Jaskaran Saini Yes
Mariusz Zieba Yes
Chelsea Dickens Yes
Tejas Pandit No
Ashwath Gadapa No
Sarah Brosens No
Kirsten Findlay No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- Welcome new members !\n\n - Mariusz Zięba (AZ)\n\n - NOTE: for any questions/ discussion best to add into github issues or discussion tab.\\\n We dont routinely look at teams so may miss your questions if loaded there.\\\n Alternatively email: Email: [Lyn.taylor\\@parexel.com](Lyn.taylor@parexel.com) or [Christina.e.fillmore\\@gsk.com](mailto:Christina.e.fillmore@gsk.com){.email}\n\n - ACTION: Lyn to check SAS PR from Jaskaran in Teams. Also go onto PHUSE TEAMS channel and add comment to ask people to contact us over github or email as we dont check teams.\n\n- Blog Update\n\n - Yannick's Tobit regression blog was sent to PSI enews, and is on the CAMIS blog page.\n - ACTION: Vikash to work on Blog for PHUSE US.\n - Format for blogs\n - PSI want short blog & cross reference to our repo for longer version.\n - PHUSE want longer blogs.\\\n ACTION: May be worth discussing with PHUSE the blogs, our plan to have monthly short blog pointing to recently content that has been added, but they requested the below?!\n - Add a summary describing CAMIS\n\n - Perhaps expand on when Tobit regression (for example) would be used\n\n - Provide an example of use, input, output, explanation of results\n\n - And to increase the length of this and make it more in the style of a blog (usual blogs limit is 1000 words)\n\n- Content updates in the last month ! Thank you All !\n\n - Introduction to Machine learning - Andrey\\\n ACTION : Christina to play with where it fits best & update TOC.\n\n - Tobit Regression Yannick - updated to 1 sided p-values\n\n - Sample size general summary and cochran-armitage trend test.\n\n - CMH (to include risk differences) & RMST (to include more methods) - Lyn\n\n - Soon - Propensity score matching will be loaded to repo today !\n\n- Repo Tech\n\n - Repo now only re-builds when changes - Reduction in time from \\>30 min now \\<6 mins\n\n - PR Previews - coming soon :) Thank you Michael Walshe!!\n\n- Conferences\n\n - Yannick was accepted for Poster at ISBC46 24-28th Aug\n\n - Fedor Logvin (PXL), is applying for PHUSE EU Connect 16-19 Nov.\n\n - Yannick & Michael & Miriam (GLMM) may also submit & go\n\n - R/Medicine - Final call for abstracts is 18th April\\\n **ACTION:** If anyone wants to present let Lyn know by Wednesday this week, if no volunteers Christina and/or Lyn would go. Let Michael Kane know if we need 1-2 days extra to submit.\\\n Schedule announcment 9th May, Pre-recorded video submission due 2nd June & Conference is 12th-13th June (via TC)\n\n - R/Pharma Nov 3-7th info TBC. call for abstracts not yet open.\n\n - R in HTA workshop - conference. Stephen attending in June.\\\n \n\n - Jaskaran is presenting in PHUSE CSS: Synthetic data.\n\n - PSI event in Cambridge England re: moving to R: 3rd July. Yannick attending & can mention CAMIS.\n", - "supporting": [ - "14Apr2025_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/14Jul2025/execute-results/html.json b/_freeze/minutes/posts/14Jul2025/execute-results/html.json deleted file mode 100644 index cdec9ca2c..000000000 --- a/_freeze/minutes/posts/14Jul2025/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "4329e6d8c8644dab202f31f8f685f0a6", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Package selection\"\ndate: \"14 July 2025\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 14_Jul_25
Lyn Taylor Yes
Christina Fillmore Yes
Chi Zhang Yes
Molly MacDiarmid Yes
Benjamin Arancibia No
Michael Kane No
Martin Brown No
Stephen McCawille No
Miriam Amor No
Peilin Zhou Yes
Samrit Pramanik No
Brian Varney Yes
Vikrant Vijay No
Yannick Vandendijck Yes
Vikash Jain No
Michael Walshe Yes
Anwesha Roy No
Min-Hua Jen Yes
Jaskaran Saini Yes
Mariusz Zieba No
Chelsea Dickens No
Tejas Pandit Yes
Ashwath Gadapa Yes
Sarah Brosens Yes
Kirsten Findlay No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n**Conferences.**\n\n- Brian attending SAS users in the fall,\n\n- Yannick got a poster accepted for ISBC & has a PHUSE EU presentation. \n\n- Lyn & christina attended PSI - lots of interest in CAMIS, many had heard of us, but lots of people still using SAS as their main programing language. Hence, we expect usage of our site to increase as more companies transition to R. Currently, we have a steady 10-30 unique people viewing the website each day.\n\n- All to update the Conferences.qmd with any confences you are attending.\n\n**Next steps for CAMIS.**\n\n- Over next few months, leadership team to do a review of current content & identify where improvements are needed. Identify new issues - & assign.\n\n- We plan to follow up with people assigned issues, but that we haven't heard from for a while. Chase up missing content.\n\n**How to select packages to use in the comparison.**\n\nChristina presented slides from PSI Conferenece with respect to how to check packages for best use see [CAMIS](../non_website_content/conferences/2025/202506_PSI_Christina_Lyn.pptx)\n\nBrian suggested another useful package to run is pkgdiff written by david bossock, this checks through packages for changes that may break prevoius code and gives a stability score. \n\nMichael also suggested you can use to look at the differences between packages.\n", - "supporting": [ - "14Jul2025_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/15July2024/execute-results/html.json b/_freeze/minutes/posts/15July2024/execute-results/html.json deleted file mode 100644 index c1323c1c0..000000000 --- a/_freeze/minutes/posts/15July2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "0cfe2c79cad4a8d4f1fa56085773b0c9", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Repo Content Growth, Conferences 2024\"\ndate: \"14 July 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 15_Jul_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang No
Orla Doyle Yes
Harshal Khanolkar Yes
Lily Hseih No
Filip Kabaj No
Martin Brown Yes
Min-Hua Jen Yes
Sarah Rathwell Yes
Kasa Andras No
Aditee Dani No
Keaven Anderson Yes
Benjamin Arancibia No
Wilmar Igl No
Vikash Jain No
Mia Qi No
Leon Shi No
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak No
Michael Kane Yes
Lukas Brausch No
Michael Walshe Yes
Seemani Abhilipsa No
Aiming Yang No
Cuifeng Yin Yes
Todd Coffey No
Jayashree Vedanayagam Yes
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy No
Samrit Pramanik No
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n**Monthly Contributions**\n\n- Not much content this month, but per the below, lots on the way !\n\n**Content updates / assignments**\n\nWe checked the assignments readme (under non-website content)\n\n- Survival -- perhaps split into: non parametric, parametric (rather than AFT & non-proportional hazards), Update readme.md table to assign these rows to the survival team.\\\n Team will meet again in 2 weeks.\n\n- Negative binomial content has been posted, it's a comparison, but would now be quick to write the SAS page. **ACTION:** Orla to ask the Novartis team to add that too.\n\n- Keaven/Yulia -- Group sequential designs: pull request internally east vs R -- not doing comparison yet, but will do in future.\n\nQUESTIONS: Do we need to use SAS enterprise or SAS studio and should we state the version that gave the output? It's probably good practice to do this in case something change. For R, we should use renv, any problem ask Christina. The R/Python code runs each time so output will be current per version being used. Only sas is static.\n\n**22nd July -- Novartis hackathon** 150 signed up. Need to provide Orla with ideas of what we want them to look at. Stats SMEs will also review before doing pull requests. Possible topics could be:\n\n- Negative binomial -- comparison & SAS code Beecer -- covariate adjustment for logistic regression\n\n- Logistic regression page update -- improve content & investigate why p value different.\n\n- MANOVA -- why R different to SAS\n\n- Friedman test, Jonckheere test, bionomial, R / sas /comp both needed\n\n- Correlation in SAS\n\n- Christina may need help to review pull request. Both Pfizer, Novartis & merck will have internal reviews prior to pull requests so review can be reduced.\n\n**Conferences**\n\n- Chi's UseR slides are now on the repository [under](https://github.com/PSIAIMS/CAMIS/tree/main/non_website_content/conferences/2024)\n- PHUSE EU Brussels 23rd Sept - Qian Wang (Merck) will attend.\n- POSIT conf, R in pharma...R open now https://github.com/rinpharma/rinpharma-summit-2024?tab=readme-ov-file\n- Harshal working with Daniel Sabanes Bove on the organizing committee for R in pharma: Asia pacific track -- woudl be good to have a repo from Asia represent us\n- 25th July: Americas single day event pennylvania anyone going?\n- JSM -- 1st week august -- Keaven Anderson going, lyn to send 1 slide.\n\nQUESTIONS: RE: funding to attend conferences, usually provided by your company, but in special circumstances we could request funding from: R consortium or PHUSE.\n\n**Brainstorming session**\n\nHow can we engage wider to increase content creation?\n\n- PSI : enews,\n\n- PHUSE bi-weekly news: Let Alexandra Peace (mailto:workinggroups\\@phuse.global) know of any events we are attending or new content we worked on and she will share in a weekly summary of progress.\n\n- Could volunteer to host hackathons for conferences. Christina & orla volunteering to host a hackathon for R in pharma this week.\n\n- Advertise to PSI/PHUSE RE: if you have Findings please add an issue (even if you can't look into it yourself).\n\n- Special media post to Thank you Merck, Pfizer, Novartis for your contributions. Also companies can post what they've contributed. New content blogs. -\n\n- Blog of Novartis hackathon.\n\n**Dissertations** --still looking for new ideas for projects & widen engagement with universities.\n", - "supporting": [ - "15July2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/15May2023/execute-results/html.json b/_freeze/minutes/posts/15May2023/execute-results/html.json deleted file mode 100644 index 70d02b071..000000000 --- a/_freeze/minutes/posts/15May2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "b87fd7a5fca6f41df40eac1afb755623", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"White Paper, Website, Launch Plan\"\ndate: \"15 May 2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 15_may_2023
Aiming Yang Yes
Ben Arancibia Yes
Brian Varney No
Christina Fillmore Yes
Chelsea Dickens No
Chi Zhang No
Clara Beck No
Aditee Dani Yes
Doug Kelkoff No
Dhvani Patel No
Filip Kabaj No
Harshal Khanolkar No
Iris Wu No
Jayashree Vedanayagam Yes
Joe Rickert No
Kyle Lee No
Leon Shi Yes
Lily Hsieh No
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane No
Michael Rimler No
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla Yes
Vandana Yadav No
Vidya Gopal No
Vikash Jain Yes
Wilmar Igl Yes
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n- White paper - Lyn/Min-Hua\n- Website progress - Christina\n- Update on Launch - Lyn\n- CAMIS-ONCO - Soma Sekhar\n- Volunteer Open Roles\n- Conference Attendance\n- AOB\n\n# Meeting minutes\n\n**White Paper Update: Min-Hua** Paula at PHUSE will distribute for public review. Over the next days we'll get a link to the official review. ACTION :Christina will put the white paper onto the website as draft open for public review\n\n**Website progress: Christina** Website content progressing well. Ben Arancibia - progressing MMRMs + other areas.\\\nSurvival - With Christina to fix importing. ANCOVA - Aditee in progress - change to CSV not SAV (SPSS file). Update to call it linear regression, Lyn to help find ANCOVA (testing treatments) Independant Two-Sample t-test in SAS - Vikash got a few changes then will load ok.\n\nLyn: To create a FAQ doc for the website. Make sure it references available material elsewhere so it doesn't become out of date quickly.\n\nNOTE: When you do a pull request, check your action to see if the checks pass/fail & reach out to Lyn/CHristina if you have problems. Remember to do snapshot::renv, so that any packages you install were snapshot to the central repo. Else it will fail when you do the pull request as the repo wont have the packages in it that your code needs.\n\n**Launch Update**\n\n- Blog video now available [here](https://github.com/PSIAIMS/CAMIS/blob/main/non_website_content/20230505%20Blog%20Recording.mp4)\n\n- Blog text to use with various lengths also available [here](https://github.com/PSIAIMS/CAMIS/blob/main/non_website_content/20230505%20Blog%20Text%20Introduction%20to%20CAMIS.docx)\n\n- Conference slides & abstract available [here](https://github.com/PSIAIMS/CAMIS/tree/main/non_website_content/conferences)\n\nRather than a standard set, plan is to have abstracts & slides/posters put into this folder (inc. name & date of conference) then people can use the contact that they have preference to use.\n\nHarshal has loaded IASCT slides to same location.\n\n- Contacts for Societies\n\n - PSI /EFSPI (Martin) - Content sent.\n\n - R Consortium / PHUSE / RSS (Lyn) - Content sent.\n\n - IASCT (Harshal) - Conference went well and lots of interest from IASCT.\n\n - ASA (Leon) - TBC who are ASA to reach out to? If Ben has any contacts that Leon could use please let him know.\n\n - SAS - we may reach out to SAS directly through PHUSE. TBC if they would be Ok with us including their data, copyright. Hopefully they'd give approval as not or project & advertising what you can do in SAS. ACTION: to find contact who may be interested in update/review of SAS. Does PHUSE have a contact already that we can use. Lyn to ask Paula. Aiming/ Martin to let Lyn know if she has a contact. ACTION : Lyn/Christina to Add a disclaimer that we are volunteers adding open source content, but if you see anything that infringes copyright please let us know and we'll remove it immediately.\n\n**CAMIS- ONCO: Soma Sekhar**\n\n- Validation of endpoints (primary/secondary oncology endpoints). Propose to do poster at PHUSE CSS. Once Mia's survival section is loaded. Sema Sekhar to review. Then highlight what's missing - what else you want to add. Max combo. BICR vs RECIST? In future we can discuss how these fit with current CAMIS structure. Focus on the Stats method ideally. ACTION :Christina to email Semar Sekhar once Survival is live on Website.\n\n**Conferences** Let's review\n\nVikesh- plan for CSS. Abstract deadline 12th June, 30th june registration opens.\\\nPosters only - only invited people can be speakers. Somar Sekhar, Aditee Dani would be happy to do posters. Suggest all 3 meet to discuss contribution to poster or doing separate ones but not duplicating the same content.\n\nAsk PHUSE CSS working group (Data visualization and open source technology) DVOST - if we can have a presentation next year at the CSS.\n\nJSM - ASA conference. Leon attending. Abstract due Feb 2024 - so try have a rep there next year.\n\nPHUSE Single day event (SDE- Toronto Mississauga), PHUSE EU got a poster abstract: Jayashree Vendanayagam PHUSE Single day event (New york - regeneron hosting Oct 16, check check if Aiming can do any poster/presentation/advert)\n\nAZ R pharm conference 7th June. LYn & Martin presenting.\n", - "supporting": [ - "15May2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/17apr2023/execute-results/html.json b/_freeze/minutes/posts/17apr2023/execute-results/html.json deleted file mode 100644 index 11a7ed05b..000000000 --- a/_freeze/minutes/posts/17apr2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "3af740cfd6365863ace4174fc8432935", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"White Paper, Website, Launch Plan\"\ndate: \"17Apr2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 17_apr_2023
Aiming Yang Yes
Ben Arancibia No
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens No
Chi Zhang No
Clara Beck No
Aditee Dani Yes
Doug Kelkoff Yes
Dhvani Patel No
Filip Kabaj No
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam Yes
Joe Rickert Yes
Kyle Lee Yes
Leon Shi No
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane No
Michael Rimler Yes
Mike Stackhouse No
Min-Hua Jen Yes
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla No
Vandana Yadav No
Vidya Gopal No
Vikash Jain No
Wilmar Igl No
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n- White paper - Lyn/Min-Hua\n- Logo - All - voting!\n- Website progress - Christina\n- Launch Plan - Lyn\n- CAMIS-ONCO - Soma Sekhar\n- Volunteer Open Roles\n- Conference Attendance\n- AOB\n\n# Meeting minutes\n\n**White Paper Update: Min-Hua** PHUSE are doing technical review so hopefully will come back shortly with any comments. I reminded them last week. Has been reviewed by leads team, now with steering committee ( Final review team), so hopefully not much longer.\n\n**LOGO: Lyn** - By a small majority the preferred option was the calculator without the P\\<0.05 in it. This will now be redrawn & finalized. ACTION: Lyn will update website when image available. Will save under CAMIS/images so you can use for any posters/ presentations.\n\n**Website progress: Christina** \\*\\*All - review of progress & answer any questions\n\nSurvival - Mia has made great progress on survival, Christina and Lyn to help fix branch issue & then will get it pushed to the live site.\n\n**ACTION :** Lyn to Create a video of creating a branch / doing updates. push/pull - github pull request. Create a FAQ doc for the website.\n\n**Launch Plan**\n\n- Align launch of website with release of white paper. Blog writing & \"Video\" launch - Lyn to write & distribute for review\n\n- Once content created reach out to the following to help advertise\n\n - PSI /EFSPI (Martin),\n\n - R Consortium / PHUSE / RSS (Lyn)\n\n - IASCT (Harshal)\n\n - ASA (Min-hua may have contact or See if Ben has a contact- ACTION christina to check with ben then get back to Min-hua. Lily Hsieh to ask Leon as he's part of ASA. Aiming can also reach out to a contact to see she has a contact )\n\n - Others : TBC\n\n**CAMIS- ONCO: Soma Sekhar**\n\nPlans are in progress\n\n**Review of volunteer open roles** Still looking for volunteers to do: - Co-ordinator for conference material - share standard slides/ content /abstracts /posters - Social media rep - to co-ordinate posts (linkedIn/Twitter) - Volunteers to represent CAMIS at various conferences\n\n**Conferences** All to let Lyn know or update the conferences qmd if you want to attend and represent/advertise camis\n", - "supporting": [ - "17apr2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/19June2023/execute-results/html.json b/_freeze/minutes/posts/19June2023/execute-results/html.json deleted file mode 100644 index d06149b9a..000000000 --- a/_freeze/minutes/posts/19June2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "b0460954b7c550bdeed8c94dc5310c3f", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"White Paper Finalization, Advertising CAMIS\"\ndate: \"19 June 2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 19_june_2023
Aiming Yang Yes
Ben Arancibia No
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens No
Chi Zhang No
Clara Beck No
Aditee Dani No
Doug Kelkoff No
Dhvani Patel No
Filip Kabaj No
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam No
Joe Rickert No
Kyle Lee No
Leon Shi No
Lily Hsieh No
Lyn Taylor Yes
Martin Brown Yes
Mia Qi No
Michael Kane Yes
Michael Rimler No
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla No
Vandana Yadav No
Vidya Gopal No
Vikash Jain Yes
Wilmar Igl Yes
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n- Team announcements - Lyn\n- White paper - Lyn\n- Website progress - Christina\n- Conferences feedback (PSI/IASCT)- Lyn/Christina/Martin/Molly/Aiming/Harshal\n- Conference planning - All\n- CAMIS-ONCO - Soma Sekhar\n- AOB\n\n# Meeting minutes\n\n**Team Announcements**\n\nMin-Hua Jen, Gave birth to twins -everyone is doing well !\n\nWelcome to: Iris Wu (Incyte)\n\nHarshal volunteered to be our Social Media Rep / co-ordinate our social media posts.\n\nRole consists of:\n\n1) Helps to come up for ideas for blogs/posts. Examples could include, if someone writes content on MRMM, you ask them to write a short description on what they've done including link to their work on the website, or for example, once the white paper is final, one of us will write a blog post to advertise it. Molly's poster prize at PSI conference etc !\n\n2) Chases people up who said they'd write a blog to ensure we are marketing our work in a timely manner.\n\n3) Ensures that when you receive content (blog/posts), that it's sent to all the key contacts for organizations/Societies (See end of minutes for list)\n\n**Social media post list of upcoming posts**\n\n1\\) Write a short message for Molly winning Poster prize. Just something in third person like \"Congrats to Molly for winning a PSI Conference 2023 Poster Prize. If you want to see the poster it's here.. (Molly to write, Lyn review, then send to Harshil/Christine to review - then Harshil to forward to contacts for advertising).\n\n2\\) Once White paper released, PHUSE will advertize but we should write something to share with our wider contacts\n\n**White Paper Update** No comments from Public review. Paper is [here](https://phuse.s3.eu-central-1.amazonaws.com/Deliverables/Under+Review/White_Paper_V6_11May23+(1).pdf) We checked for any last comments from the group. ACTION: Lyn to send confirmation that we are good to proceed to PHUSE tomorrow if no further comments.\n\n**Website progress: Christina**\n\nSubstantial content been added in the last month.\n\nOnce key area we'd like to progress on though is MMRM. ACTION: Christina to ask Kevin Kunzmann if he can write up something.\n\nLyn - To create a FAQ doc for the website. Make sure it references available material elsewhere so it doesn't become out of date quickly.\n\nPSI AIMS will create github training which we can utilize to onboard statisticians.\n\n**Conference feedback:**\n\nPSI Conference: Lyn/Christina/Martin/Molly/Aiming. Molly's conference poster won a Poster Prize ! Need to blog/advertise the award.\n\nIASCT: Harshil. Spoken to board members, currently board going through election so will restart discussion after that.\n\n**Conference planning**\n\nVikash- plan for CSS. Abstract has been submitted by Soma Sekhar (Co-author Vikash & Adittee), 30th june registration opens.\n\nACTION: Lyn: Ask PHUSE CSS working group (Data visualization and open source technology) DVOST - if we can have a presentation next year at the CSS.\n\nJSM - ASA conference. Leon. Abstract due Feb 2024 - so try have a rep there next year.\n\nPHUSE Single day event (SDE- Toronto Mississauga),\n\nSCT - society of clinical trials - Michael kane? (ACTION : Lyn to update conf website)\n\nSESUG - South Eastern SAS user group: Brian Varney (ACTION : Lyn to update conf website)\n\nPHUSE EU got a poster abstract: Jayashree Vendanayagam\n\nPHUSE Single day event (New york - regeneron hosting Oct 16, Aiming emailed host to see if she can do a poster/presentation/advert - Lyn to add to conf page)\n\nR in Pharma - Brian or Christina to possibly submit something. Nov virtual. POSIT CONF - September in chicago. (Lyn update website - Christina wont be at POSIT, split into 2 )\n\nPhuse EU connect : the CAMIS abstract was selected as back up talk only. However there are some companies who have company talks - which take priority so limited independent speakers to accept talks from. TBC if anyone on the list of speakers - could include a slide to advertise us at PHUSE EU!\n\nLessons learnt for conferences:\n\n1\\) Put PHUSE CAMIS on abstract (part of PHUSE DVOST).\n\n2\\) submit abstract for Poster & Talk - then you have the back up of a poster if talk is rejected.\n\n**CAMIS- ONCO: Soma Sekhar**\n\n- No update this month, carry to next month. Validation of endpoints (primary/secondary oncology endpoints). Propose to do poster at PHUSE CSS. Once Mia's survival section is loaded. Sema Sekhar to review. Then highlight what's missing - what else you want to add. Max combo. BICR vs RECIST? In future we can discuss how these fit with current CAMIS structure. Focus on the Stats method ideally. ACTION :Christina to email Semar Sekhar once Survival is live on Website.\n\n**Previous meeting notes/ Key Information**\n\n- Contacts for Organizations/ Societies\n\n - PSI /EFSPI (Martin)\n\n - R Consortium / PHUSE / RSS (Lyn)\n\n - IASCT (Harshal)\n\n - ASA (Leon) - TBC who are ASA to reach out to? If Ben has any contacts that Leon could use please let him know.\n\n - SAS - Contact TBC.\n\n- Roles\n\n - Lyn Taylor - Lead\n\n - Christina Fillmore - Website/ co-lead\n\n - Soma Sekhar Sriadibhatla- CAMIS ONCO\n\n - Harshal Khanolkar - Social media rep\n\n - Linear models team - Brian Varney, Vikash Jain,\n\n - MMRM - Ben Arancibia / Kevin Kunzmann\n", - "supporting": [ - "19June2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/20Nov2023/execute-results/html.json b/_freeze/minutes/posts/20Nov2023/execute-results/html.json deleted file mode 100644 index 14db7429a..000000000 --- a/_freeze/minutes/posts/20Nov2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "5848d842a9422a6f8a3f4e5b99ddb336", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"End of year summary, plan for 2024\"\ndate: \"20 Nov 2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 20_nov_2023
Aiming Yang No
Ben Arancibia No
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens Yes
Chi Zhang Yes
Clara Beck No
Aditee Dani No
Doug Kelkoff No
Dhvani Patel Yes
Filip Kabaj Yes
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam Yes
Joe Rickert No
Kyle Lee No
Leon Shi No
Lily Hsieh No
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane No
Michael Rimler Yes
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla Yes
Vandana Yadav No
Vidya Gopal No
Vikash Jain Yes
Wilmar Igl Yes
Orla Doyle Yes
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- **End of year summary**\n\n Post & summary diagram of 2023 will go onto linkedIn soon, please like/share\\\n We would like to welcome **Harshal Khanolkar to become a co-lead of the PHUSE CAMIS Repo**. Harshal has been instrumental through 2023 helping Christina and I stay on track and making suggestions for improving the social media and knowledge sharing within the group.\n\n MMRM now updated & Complete for now.\n\n Github training will be on youtube following R/pharma conference soon\\\n \\\n **2023 : A Year of Progress for PHUSE CAMIS Working Group**\n\n As we draw towards the end of 2023, the PHUSE CAMIS Working Group reflect on their key progress and successes this year.\n\n The CAMIS repository went live in January 2023, drawing on the content from the PHUSE CSRMLW project.  This searchable repo compares analysis method implementations in software (CAMIS) such as SAS, R and python. \n\n The White Paper was published in June, which highlighted the importance of clearly specifying your analysis, such that it can be replicated in different software, and isn't relying on default options which can be different.\n\n For more complex analyses, it can still be hard to understand what defaults and algorithms your software is using, so the team focused 2023 on expanding our repo content, comparing SAS vs R methods.  By August, we had covered the following topics in the repo: quartiles, rounding, anova, mmrm, cmh, log-rank, cox-ph, mcnemar's test, kruskal-wallis test and logistic. October saw the launch of the sub-working group: CAMIS-Oncology, led by Somasekhar Sriadibhatla (AstraZeneca).  This team will focus specifically on oncology endpoints and analyzing them in SAS, R and Python.  The CAMIS team have expanded in membership during 2023 presenting at numerous conferences around the world. In November, we welcomed Harshal Khanolkar (NovoNordisk), to join the leadership team alongside Christina Fillmore (GSK) and Lyn Taylor (PAREXEL).  Our focus for 2024, will be on the creation of additional content for the repo, and sharing awareness of the project across the medical research and wider community.  We'd like to take this opportunity to thank all of our team members and contributors, and encourage everyone to check out the repository and help us to grow our content [CAMIS (psiaims.github.io)](https://psiaims.github.io/CAMIS/).  If you would like to join the team please get in touch through the repo.\n\n- **2023: lessons learnt: What we did well?**\n\n - Adverts to industry & linkedin posts. To be Continued into 2024 - engage more unis, internship projects, academia, posit conf, r users conf, target key conferences\n\n - Good sharing of conference content through the repo & improving the slides in an ongoing way.\n\n - Leadership & project progress with plans. Transparency of the work. Nice to get Agendas pre-meeting & minutes after meeting in timely manner.\n\n - Large range of individual contributions helping to grow repo. 1/2 contributors within the phuse group, but 1/2 outside of the group. So spreading the word is really helping us to get external contributions.\n\n - **ACTION:** Christina & Chi: Please can you improve the 2023 conference tab, create a 2024 tab which contains link to presentations within the github repo.\n\n- **2023: lessons learnt: What we didn't do so well?**\n\n - Time to get pull requests approved. Aim for 2024 to reduce the time so it's a maximum of 2 weeks. The delay was often caused by issues with renv. Christina is working with posit directly to improve renv issues & has already updated contributions guidance to help instruct people on how to contribute such as using Forks rather than needing github username access.\n\n - python - Delayed discussion in how to design the repo to store python content.\\\n **ACTION:** Vikash/ Soma / Filip - to meet with Lyn / Christina to agree format going forward.\\\n More discussion on CAMIS ONCO below.\\\n\n- **2023: lessons learnt: What is our focus for next year?**\n\n - More content\n\n - engage more unis, internship projects, academia, posit conf, r users conf, target key conferences\n\n - CAMIS ONCO white paper, workshop & python/sas/r comparison (See below)\n\n - CSS 2024 workshop, interaction with audience. 3-4 hrs hopefully. TBD at separate meeting, agenda workshop. Vikash, Harshal, Soma. 3-5 June.\n\n - Idea for 2024: Set up a method such that people with no git / github skills can still contribute to the project. Perhaps set up a CAMIS email address. Assign volunteers for someone to email, then the github experts can load it in. Decide best process to non-R, non-github people.\n\n- **CAMIS-ONCO**\\\n Plan to create cheat sheet for phuse 2024 - can go on CAMIS.\\\n Need more volunteers in order to address all the endpoints. Oncology / survival team members needed to join Soma & Team. AZ investing in ChatGPT AZ version, it can create python code from SAS.\\\n If AI can convert SAS code to python, we will then need people to test it. Volunteers needed to run in python. Can use the CAMIS repo data to test on hopefully but may need more detailed data? To see what data we currently have in the repo: see \"data-info\" and \"data\" folders.\n\n **Action:** Chi to have a look at the data folders, and decide better way to control/document data. Chi volunteered to help with Soma's test to test Python. Harshal may also be able to find volunteers at novonordisk. Starting point for python would be the default options vs R.\\\n \\\n **ACTION:** Lyn to Add to members list, who can run which languages & specialist areas (CAMIS-ONCO). **ACTION:** Soma to put poster into non_website_content/conferences.\n\n CAMIS: ONCO White paper: Needs to be progressed. invite all members to see if they can contribute. Set up regular meetings in 2024.\n\n- **Plan for 2024 : Project board in github: 5 categories**\n\n - CAMIS : Generic Method Implementation Team: More content\n - CAMIS-ONCO: items as above\n - User Experience/Demo Team\n - Social media & Engagement: Advertise/ Universities\n - bi-monthly post re: new content (newsletter: form to subscribe to newsletter so that when we post out they get informed). **ACTION:** lyn to check with PHUSE if we can do this, or if we want to ask R consortium to help similar to R validation hub email list.\\\n Also would be good to have blog post tab on repository. **ACTION:** Chi to help Christina with design. Idea would be to have 1 post which goes out on social media, to the emails subscription & on the website.\n\n - more relationship with ASA OpenStatsware - Orla Doyle.\\\n [openstatsware (rconsortium.github.io)](https://rconsortium.github.io/asa-biop-swe-wg/) **ACTION:** Lyn/Christina/Orla to set up call to discuss collaboration.\\\n - General Tasks:\n - Plan to review & accept content within 2 weeks of pull requests.\n - POSIT help with RENV situation\n - Search Engine Optimization: CAMIS full name on website? how do we become top hit ? Any volunteers to help with this let us know.\n\n- **ACTION:** Lyn to Cancel Dec Meeting 11^th^ Dec. next meeting 8th Jan 2024\n", - "supporting": [ - "20Nov2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/21Aug2023/execute-results/html.json b/_freeze/minutes/posts/21Aug2023/execute-results/html.json deleted file mode 100644 index 07eb60b2c..000000000 --- a/_freeze/minutes/posts/21Aug2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "752069ee5b655f3e0ee8f1b91b13f53f", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"FDA quartely meeting, FDA CSS, SDEs, website & conference plans\"\ndate: \"21 Aug 2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 21_aug_2023
Aiming Yang No
Ben Arancibia No
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens Yes
Chi Zhang Yes
Clara Beck No
Aditee Dani Yes
Doug Kelkoff No
Dhvani Patel No
Filip Kabaj Yes
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam Yes
Joe Rickert No
Kyle Lee No
Leon Shi No
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane No
Michael Rimler No
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla Yes
Vandana Yadav Yes
Vidya Gopal No
Vikash Jain Yes
Wilmar Igl Yes
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n**PHUSE FDA CSS Poster acceptance & white paper planning:** Soma Sekhar\n\n**Social Media update :** Harshal\n\n**PHUSE SDEs: Missisauga: June 8^th^ feedback:** Jayashreee\n\n**Preparation for PHUSE FDA Quarterly meeting 13^th^ sept:**  **Questions/Slides feedback:** Lyn\n\n**Website** Christina/ All\n\n- New Role: \"Content curation lead\"\n\n - We have been missing some posts when added to issues/discussion pages on website\n\n - This role will Monitor the  \"Discussion\" and \"Issues\" pages of the repo, and help to raise at each meeting where we need volunteers to answer questions/ add to discussion.\\\n We will add a Standard agenda item lead by the \"Content curation lead\" to go through issues, & assign to people / close down issues/ discussions.\\\n If you would like to volunteer please let Lyn / Christina know.\n\n- How can we encourage creation of more content?\n\n\n\n- What areas are key for us to focus on\n\n- MMRM update\n\n- github training plan (R/Pharma workshop & PSI training course)\n\n**Upcoming conference planning**.\n\n- RSS 7^th^ Sept: Lyn presentation\n\n\n\n- POSIT Conf: Lyn to reach out to Juliane / Doug to ask to include slide for CAMIS\n\n- PHUSE SDE New York: Oct 16^th^ :  Aiming\n\n# Meeting minutes\n\n**Review of Action log**\n\n| Action | Assigned to | Status |\n|--------|-------------|--------|\n| | | |\n| | | |\n| | | |\n\n**Welcome to new members**: Chi Zhang & Filip Kabaj\n\n**PHUSE SDEs: Missisauga: June 8^th^ feedback:** Jayashreee\n\nGPT chat & machine learning for oncology presentations, needs for guidelines on how to use next level tools were discussed. Jayashree presented on behalf of CAMIS but on a Shiny App & got good questions including highlighting that for Endpoint /efficacy analysis they may require very specific standards so not easy to be generic/default. Chris Hurley (PHUSE SDE) also mentioned CAMIS which is great exposure for us.\n\n**Preparation for PHUSE FDA Quarterly meeting 27^th^ sept:**  **Questions/Slides feedback:** Lyn/ Harshal\n\nMeeting was postponed by 4 weeks so we have time to prepare a short survey and send out on social media. Harshal went through the proposed questionnaire. Filip suggested Q2 to refer to frequency more specifically. Harshal to update & distribute\n\n**Social Media update :** Harshal\n\nHarshal has posted the PSI poster post to social media. Going forward the proposal is to run a series of posts to focus on the content on the website - perhaps a short post just to say have you seen this new content and provide links.\n\nRE: Workshop - **FDA CSS event (5-7 June):** could run a comparison of SAS vs R workshop. Could focus on a set of issues & work though them make content & resolve issues. Could turn to linkedIn to ask wider community to vote for the biggest issue outstanding that they'd like to look into and select these for resolving at the workshop. See item below, as can discuss with Soma following this years FDA CSS event.\n\n**Website** Christina/ All\n\n- Thank you ALL!! Much content has been recenly pushed to the website.\n\n- New Role needed for a \"Content curation lead\"\n\n - We have been missing some posts when added to issues/discussion pages on website\n\n This role will Monitor the  \"Discussion\" and \"Issues\" pages of the repo, and help to raise at each meeting where we need volunteers to answer questions/ add to discussion.\\\n We will add a Standard agenda item lead by the \"Content curation lead\" to go through issues, & assign to people / close down issues/ discussions.\n\n Jayashree & Chi volunteered to take on the role & to help monitor the repo activity. Lyn & Christina can put together guidance of what's needed. Currently it's just the issues & discussion, as pull requests are currently Ok being approved by Lyn & Christina as it's a bit tricky to make sure it fits in with the repo and doens't break anything!\n\n Chi suggested that Christina check out projects to see if that would help to monitor whose doing what - may not work if can only be accessed by those already with granted access as we want anyone to be able to assign themselves. ACTION Christina to: Change the readme to say how to assign yourselves to content tasks.\n\n- How can we encourage creation of more content? / What areas are key for us to focus on\n\n - Great increase in content pre-meeting so Ok to grow organically for now.\n\n - MMRM update : Christina to add link to MMRM website to cross reference.\n\n- github training plan (R/Pharma workshop - free to attend in end October & PSI training course - series of session each week for x weeks, & PSI Conference Amsterdam - workshop 1.5 hrs.)\n\n**PHUSE FDA CSS Poster acceptance (sept 20th) & white paper planning/ CAMIS ONCO:** Soma Sekhar\n\nSoma demonstrated the poster which he'll present with Vikash at CSS. Focused on Solid tumors OS/PFS but could broaden CAMIS ONCO with time to include other cancer.\n\nLonger term plan to create a white paper and to load survival analysis in python to repo.\n\nSoma to work with Harshil & others, to plan a sub team to work on CSS workshop for June 2023.\n\nLyn asked if there is demand for packages to be written that do standard stats analysis? The difficulty with this is how to standardise the programming and what it adds in addition to existing packages. It may not be worthwhile as options need to be considered so can't automate.\n\n**Upcoming conference planning**.\n\n- RSS 7^th^ Sept: Lyn presentation\n\n\n\n- POSIT Conf: Lyn to reach out to Juliane / Doug to ask to include slide for CAMIS\n\n- PHUSE SDE New York: Oct 16^th^ :  Aiming\n\n- R/Pharma - Christina submitted presentation\n\n**AOQ/AOB - None.**\n", - "supporting": [ - "21Aug2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/23Jan2023/execute-results/html.json b/_freeze/minutes/posts/23Jan2023/execute-results/html.json deleted file mode 100644 index 5b6ab88c6..000000000 --- a/_freeze/minutes/posts/23Jan2023/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "674a7439d033b66117008bd722a3499d", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"New Website Discussion\"\ndate: \"23Jan2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 23_Jan_2023
Aiming Yang Yes
Ben Arancibia Yes
Brian Varney Yes
Christina Fillmore Yes
Chelsea Dickens No
Chi Zhang No
Clara Beck No
Aditee Dani No
Doug Kelkoff No
Dhvani Patel No
Filip Kabaj No
Harshal Khanolkar No
Iris Wu No
Jayashree Vedanayagam Yes
Joe Rickert Yes
Kyle Lee Yes
Leon Shi Yes
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane No
Michael Rimler Yes
Mike Stackhouse No
Min-Hua Jen Yes
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla Yes
Vandana Yadav No
Vidya Gopal No
Vikash Jain No
Wilmar Igl No
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Meeting minutes\n\n## Christina provided a summary of work to date on the website\n\nRepo now live: $$https://psiaims.github.io/CAMIS/$$\n\nPrimary mode of navigation will be the table of contents..\n\nComprehensive Search function is available to supplement the use of the TOC.\n\nThe website is build from 3 folders in github:\\\nR SAS Comp\n\nThese folders, map to the columns of the table, I.e. everything about R is in Quarto files under R.\n\nComp folder: for the Comparison -- name sure you name the two software you are using r-sas - so we can use this when dynamically selecting.\n\nIn future we can add Python / Julia directories.\n\nThe idea would be for people to use the: $$CAMIS/templates/R_template.qmd$$ - A template of how to write documentation for the R part of the site. They'd Edit template & save it back into the R folder naming it clearly for what it is. Template should also contain name packages being used at start of each method comparison. It'd be difficult to be exhaustive with all the survival analysis packages i.e. accelerated failure time packages, etc.., but as long as stated hopefully can grow over time.\n\nThe Data-info folder -- contains description of all data being used for the comparisons. Going forward if different data used, the information about the data would be put into this folder. This allows the data description to sit outside of the comparison folders & where possible same data be used across comparisons.\n\n### Questions & Discussion\n\nJoe & Michael raised that the About tab which has information about the project is out of date, so should be updated. We also have no detail on the driving mechanism... I.e. what we would like from collaborators. Add \"How to collaborate\" button.\n\nItems to be discussed further which may need to be included in the site:\n\n1. update Methods: needs to make it more robust to future uploads - i.e. topics within linear models? (Sub categories) focus on methods, but how sort the methods for inclusion of all in future\n\n2. Rating the software discrepancies. I..e How severe the difference is?\n\n3. Need to create a template for comparisons. Discuss if we would have a purpose/highlight of comparison/ summary/conclusions at the top first. Also if we put List of R packages this comparison uses (use Tags?) - Need to consider if package superseeded/ multiple packages whether they go in 1 document or multiple.\n\n4. How to expand to sort by: therapeutic area relevance (would be good to link from methods to Oncology somehow\n\n5. What if a different package.., does same analysis... have to make it clear which package is being used & include multiple packages. It was agreed that as long as we are clear on what we have compared then Its ok to not be all inclusive. That can be added by other collaborators later. It was noted by Kyle that for survival (I.e. accelerated failure time packages), it may be hard to include all. The recommendation is to start with 1 and can expand further as it grows. We may have to re-think website design as it grows to accommodate. Hence why we want everything written in smaller parts to can easily manipulate going forward.\n\n## Min-Hua provided an update on the white paper:\n\nIn its final stages of review by team, and will now be sent for wider review.\n\n# Summary of actions: Due early march.\n\n### Lyn: review/update the \"About\" tab on website to ensure it's accuracy.\n\nShow who it's sponsored by / who the group are that are supporting it and ensure that it's legally ok: PHUSE, PSI and ASA -- Trancelerate?\n\n### Christina: Add \"How to collaborate\" tab & info.\n\n### Convert the CSRMLW content into the new format CAMIS site.\n\nDo let Lyn know if you need help, advice and we can also ask for other volunteers to assist:\n\n### Mia Qi/ Min-Hua Jen: Survival\n\n### Brian Varney: Linear models\n\n### Ben Arancibia: Mixed models: -- NOTE: new MMRM package to also be added if possible.\n\n### Aiming Yang: CMH: -- set up a separate meeting with Lyn/Christina to discuss how.\n\n### All: Continue to document any differences you find ready to load to new website. Follow R template so we can get them loaded as soon as we have the website ready\n\n### Soma Sekhar: Consider starting to set up CAMIS-ONCO group to focus on Oncology analyses differences\n\n# Next meeting: 13th Feb 2023: 4:30 UTC, 11:30 EST.\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/8Apr2024/execute-results/html.json b/_freeze/minutes/posts/8Apr2024/execute-results/html.json deleted file mode 100644 index 6181db569..000000000 --- a/_freeze/minutes/posts/8Apr2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "11441d4d69362522d7027f2237054954", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"General linear models is complete\"\ndate: \"8 Apr 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 08_Apr_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang Yes
Orla Doyle No
Harshal Khanolkar No
Lily Hseih No
Filip Kabaj No
Martin Brown Yes
Min-Hua Jen No
Sarah Rathwell Yes
Kasa Andras No
Aditee Dani No
Keaven Anderson Yes
Benjamin Arancibia Yes
Wilmar Igl No
Vikash Jain Yes
Mia Qi No
Leon Shi Yes
Vandaya Yadav Yes
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak Yes
Michael Kane Yes
Lukas Brausch Yes
Michael Walshe Yes
Seemani Abhilipsa Yes
Aiming Yang Yes
Cuifeng Yin Yes
Todd Coffey No
Jayashree Vedanayagam No
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy No
Samrit Pramanik No
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n## This months achievements\n\nGeneral Linear Models is Complete !\n\nThanks David & others for great contributions this month.\n\nWe are seeing more python examples coming through too.\n\n## Checklist for pull requests\n\nChristina will add something to help ensure a smooth process\n\n## CSS Connect progress update -- Soma/ Vikash\n\nVikash to reach out to Soma. Harshil unlikely to be available in person, so we can look for further support to attend & help you if needed.\n\n## Any other Conferences that people have applied for?\n\n- Lukus & Stephen are attending PHUSE EU Connect with other topics, but if others going CAMIS members could meet up in person, TBC nearer the date\n\n## Current assignments\n\nLyn to put together Table so we can easily see whose doing what.\n\n- Sarah Rathwell & Christina volunteered to work on Kolmogorov-Smirnov test\n\n- Lukas Brausch to pick up Python one sample t-test, paired & 2 sample t-test\n\n- Chi to reach out to open stats ware to see if they can improve MMRM & add any bayesian MMRM\n\n- Leon to look at Reference-based MI (using either SAS macro, or procedures directly).\n\n- Keaven/Martin to look at group sequential design\n\n- CAMIS-ONCO no kick off yet, so Stephen McCawille will start to look at SAS Accelerated failure time models, Volunteer needed to run the same in R.\n\n- Todd/Cuifeng will be looking at Non linear models\n\n- ALL - if you are looking for an assignment reach out to Christina & Lyn and we can group you together to collaborate.\n", - "supporting": [ - "8Apr2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/8Jan2024/execute-results/html.json b/_freeze/minutes/posts/8Jan2024/execute-results/html.json deleted file mode 100644 index f5f6a8a68..000000000 --- a/_freeze/minutes/posts/8Jan2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "07462205e674787b72e92aa1ca287fff", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"CAMIS-ONCO, Conferences, Academic & regulatory input plans\"\ndate: \"8 Jan 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 08_Jan_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang Yes
Orla Doyle Yes
Harshal Khanolkar Yes
Lily Hseih Yes
Filip Kabaj Yes
Martin Brown Yes
Min-Hua Jen Yes
Sarah Rathwell Yes
Kasa Andras Yes
Aditee Dani Yes
Keaven Anderson Yes
Benjamin Arancibia No
Wilmar Igl No
Vikash Jain No
Mia Qi No
Leon Shi No
Vandaya Yadav No
Stephen McCawille No
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak No
Michael Kane No
Lukas Brausch No
Michael Walshe No
Seemani Abhilipsa No
Aiming Yang No
Cuifeng Yin No
Todd Coffey No
Jayashree Vedanayagam No
Ashwath Gadapa No
Miriam Amor No
Anwesha Roy No
Samrit Pramanik No
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- **CAMIS- ONCO: Update on progress & next steps to include:**\\\n Regular meetings Cheat sheet for PHUSE 2024 PHUSE CSS planning (workshop in June). Python volunteers & code creation. White paper.\\\n ACTION: Lyn to follow up with Soma/Vikesh to assess status of CAMIS-ONCO. Also set up meeting with team to discuss python content going into website\n\n- **Other Conference planning**\\\n Lyn will update the conference tab on the repo.\\\n PHUSE US Connect (Soma/ Vikesh) and Brian are attending.\n\n UseR is now open for abstract submission (deadline mid-march). Any volunteers to submit /attend. Salzburg (Europe) 8-11 July. Chi will be going, and volunteers to submit an abstract for us.\n\n PharmaSUG - Abstracts due 15th January. Conference is: May 19th-22nd Baltimore. Volunteers required to submit abstract if possible.\n\n- **Content updates**\\\n Anyone with any questions about what they are working on or how to assign themselves?\n\n - MMRM - Volunteer please to look at Proc Mixed vs Proc GLIMMIX and use this to expand the SAS/mmrm.qmd file.\n\n - Keaven Anderson (Merck) - will start to look at SAS vs R for sample size / group sequential design / power. They use EAST, gsDesign, but others use rpact. Does anyone have experience of this (& using SAS for sample size)? Lyn & Martin & Keaven will meet to discuss on Friday.\n\n - Christina: will add sales pitch to Website - Why CAMIS !? + re-arrangment of some of the content.\n\n- **Objective to get more regulatory input**\\\n Work with PSI AIMS as they plan a EMA regulatory panel discussion on R Any other ideas?\\\n FDA/ Other regulators input/discussion.\n\n- **Git training plan for 2024** PSI conference abstract rejected. Creation of a short training session (like the R/pharma workshop) or 6 week 2 hr/ week course. ACTION: Lyn/Christina/Martin to follow up with PSI re: delivery of training. Restart GIT training meetings (Christina/Alex/ Irene)\n\n- **Interaction with more Academics & Universities**\\\n Please can you present/advertise to your universities contacts. Anyone got contacts they can utilize? Ideas for spreading the word? Lyn doing Presentation at University of Sheffield on 28th Feb for RSS local group.\n\n- **Academia Projects** ALL: to think about possible dissertation projects. Plan to list available projects in repo & write descriptions of what the project would entail such that universities students can use them at dissertation projects Prof Richard Stevens (Oxford) is open to projects if we have any. Also Novonordisk : working with Alberg Denmark university to have a proposal for project.\n\n- **Raising awareness within companies to flag issues to CAMIS**\\\n ALL: brainstorm how we can spread awareness within our organization & wider community\n\n EFSPI - PSI strategy day / heads meeting\\\n ASA OpenStatsware - Orla Doyle: Focus is more on package development. If a gap comes up we could make them aware package is needed. Can also look to sassy r package to see if that replicates SAS (if it's right to do so) David Bosak. Lyn meeting with David next later this week.\\\n \\\n Plan for next Blogs:\n\n1) add blog tab to repo, then when we post we can link through.\n2) Ideas for next blogs? - perhaps pick a topic we have content already for & post blog on it.\n\n- **Funding requirements** NOTE: We can apply for a grant for any funding if anyone sees an opportunity to progress our work quicker through this method. NOTE: if any university project or individuals need funding to do this CAMIS work (creation of content), then we do have an option to apply to the R Consortium for funding.\n\n- **AOB**\n\n - Linear Regression SAS & R, text are now live on website. Results match, but would be good to add a COMP file which just says what we checked & what matched... for example incase something comes up in future that does not match.\n", - "supporting": [ - "8Jan2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/9Dec2024/execute-results/html.json b/_freeze/minutes/posts/9Dec2024/execute-results/html.json deleted file mode 100644 index ad9c2f516..000000000 --- a/_freeze/minutes/posts/9Dec2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "6795495b062b0e4d88b920fb597a5b27", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"CAMIS End of Year Thank you\"\ndate: \"09 December 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 09_Dec_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang No
Orla Doyle No
Harshal Khanolkar Yes
Lily Hseih No
Filip Kabaj No
Martin Brown No
Min-Hua Jen No
Sarah Rathwell No
Kasa Andras No
Aditee Dani No
Keaven Anderson No
Benjamin Arancibia Yes
Wilmar Igl No
Vikash Jain No
Mia Qi No
Leon Shi No
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak No
Michael Kane Yes
Lukas Brausch No
Michael Walshe Yes
Seemani Abhilipsa No
Aiming Yang No
Cuifeng Yin No
Todd Coffey No
Jayashree Vedanayagam Yes
Ashwath Gadapa No
Miriam Amor Yes
Anwesha Roy Yes
Samrit Pramanik Yes
Agnieszka Tomczyk No
Prem Kant Shekhar No
Sunil No
Kate Booth Yes
Peilin Zhou Yes
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n- 2024 resulted in 160 pull requests and 64 pages of new content\n\n- CAMIS 2024 Awards\n\n - Longest Serving Active members (from CSRMLW to CAMIS!): **Brian Varney, Min-Hua & Mia Qi**\n\n - Most Pull Requests Reviewed: **Orla Doyle**\n\n - Most Contributions (especially for python content): **Seemani Abhilipsa & Lukas Brausch**\n\n - Section Closer (for general linear model section): D**avid Bosak**\n\n - Expanding Project Remit: **Yuli Sidi & Nan Xiao** (EAST), and **Michael Walshe** (Survey stats)\n\n - Best Written (SAS and R cumulative incidence functions): **Lillian Yau**\n\n - Most Shocking Finding (epibasix package undocumented CI method): **Molly Mcdiarmid**\n\n - Rookie of the Year (From first PR to completing Wilcoxon signed rank section, presenting at PHUSE EU & winning Best Presentation award in the Analytics and Statistics Stream: **Agnieszka Tomczyk**\n\n**2025 Objectives**\n\nWorking group re: Improving Technical back end (CICD, tech team)\\\nVolunteers needed - So far Christina & Michael Walshe\n\nTo explore: - Posit workbench to improve rendering\n\n- When PRs come in, be able to render view before pulling in\n- Strategy for how to make Comparison pages more stable with respect to R version changes\n- Possibility to include checks which run the code & check for change\n\nExpand our influence (particular through representation in USA)\n\nKey Content\n\n- MMRM -- Stephen Waugh\n\n- Sample size - Agnieszka & Andisheh & Molly\n\n- Tobit regression - Yannick\n\n- CIs for Props & Logistic regression & RTSM- Lyn\n\nReview conference attendance at January 2025 meeting.\n\nAll to let us know any feedback or suggestions for 2025.\n", - "supporting": [ - "9Dec2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/9oct2023/execute-results/html.json b/_freeze/minutes/posts/9oct2023/execute-results/html.json deleted file mode 100644 index 8ea0876e3..000000000 --- a/_freeze/minutes/posts/9oct2023/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "8cdfc68ff69e90c18695f8a689067dff", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"FDA quartely meeting, 1st survey feedback - general updates\"\ndate: \"09 Oct 2023\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 09_oct_2023
Aiming Yang No
Ben Arancibia No
Brian Varney Yes
Christina Fillmore No
Chelsea Dickens Yes
Chi Zhang Yes
Clara Beck No
Aditee Dani No
Doug Kelkoff No
Dhvani Patel Yes
Filip Kabaj No
Harshal Khanolkar Yes
Iris Wu No
Jayashree Vedanayagam No
Joe Rickert No
Kyle Lee No
Leon Shi No
Lily Hsieh Yes
Lyn Taylor Yes
Martin Brown Yes
Mia Qi Yes
Michael Kane No
Michael Rimler No
Mike Stackhouse No
Min-Hua Jen No
Molly MacDiarmid Yes
Mona Mehraj No
Paula Rowley No
Soma Sekhar Sriadibhatla No
Vandana Yadav Yes
Vidya Gopal Yes
Vikash Jain No
Wilmar Igl Yes
Orla Doyle No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda\n\n**Welcome New Members:** Vikrant Vijay FDA, Ismael Rodriguez (Appsilon)\n\n**PHUSE SDE: Missisauga:** June 8th feedback: Jayashreee\n\n**CAMIS-ONCO update:** Soma Sekhar / Vikash Jain\n\nPHUSE US Connect 2024 planning (Poster accepted, will find out re: workshop oct 20th)\n\nWhite paper planning\n\n**FDA meeting update** **/survey results**.: presented to around 15 FDA representatives, but didn't get any questions/ comments on the call. However, follow the meeting, our slides were passed to others at the FDA and Vikrant Vijay got in touch to join the group. He's not available today, but will attend in future.\n\nQuestionnaire only got 16 responses, 9/16 had heard of CAMIS, 13/16 used R for GXP, 4/15 used python for GXP, 1/16 used Julia for GXP. 9/16 had experienced discrepancies whilst trying to replicate analysis between languages.\n\n**ACTION**: To re-run maybe just before CSS 2024 / or each year to assess progress and use for conference presentations.\n\nCommon programming challenges\n\n- Whilst executing Analysis in R, I come across challenge in Numerical Differences in Statistical results. Would it be great if Industry & regulatory work together to build Standard R package for Statistical Methods & details in CAMIS repository would be highly appreciated.\n\n- Ensuring reproducible environments and having people accept that different results for different implementations of an algorithm should perhaps be interpreted as a hint towards the accuracy, rather than one of the methods being wrong.\n\n- Standard deviation initially did not match between and SAS. Later resolved by using the type option\n\n- Challenges to figure out array of methods to replicate the same results across different software platforms/ Finding why resutls differ / unclear documentation/ discrepancy in values\n\n- LS means contrasts from GLMs or MMRMs between SAS and R (vis lme4/mmrm + emmeans)\n\n- Parsing issues, scalability issues and network crash.\n\n- Implementation of median seem to differ between R and SAS.... Sometimes joins in dplyr can also behave differently than i would expect with raw SQL\n\n- Different methodologies (e.g for sample size calculation0 and lack of non-standard methods in SAS (e.g sample size for adaptive design).\n\n**Social Media update** : Harshal\\\nNewsletter, (quarterly, or monthly) to advertise progress (ie. content we created) & conferences we are attending. Can we advertize ourself more to EMA, PMDA etc.. Can send to Frank Petavy (methodological working party). ACTION: Harshal/Lyn put togther summary for newsletter & send to Wilmar to reach out to Frank & others. Lyn to email David to see if any wider participation.\n\nhttps://www.ema.europa.eu/en/committees/working-parties-other-groups/chmp/methodology-working-party\n\n**Website** Christina/ Chi / Jayashree\n\n- All pull requests accepted & everything up to date\n\n- MMRM update - No update. ACTION: Chi Zhang: will follow up to see if we can get someone to add in MMRM package to our existing content.\n\n- github training plan (R/Pharma workshop & PSI training course) - ongoing prep for workshop & course through PSI AIMS team.\n\n- Agnieszka (PAREXEL) and Chi -- working on Wilcoxon test content for paired & unpaired data.\n\n- ACTION: Chi & Christina to talk about local rendering & renv issue with not having the packages to be able to render... once we know a fix, can write up and put on the website.\n\n**Upcoming conference planning**\n\n- PHUSE SDE New York: Oct 16th : Aiming\n\n- PHUSE US Connect: Soma/Vikash\n\n- R/Pharma -- Christina?\n\n- SESUG (South East SAS user group) late october 2023, Brian will present on CAMIS.\n\n- North Carolina - SDE if anyone wants to volunteer to attend let us know.\n\n**AOB** - None.\n", - "supporting": [ - "9oct2023_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/minutes/posts/9sept2024/execute-results/html.json b/_freeze/minutes/posts/9sept2024/execute-results/html.json deleted file mode 100644 index 58dffad87..000000000 --- a/_freeze/minutes/posts/9sept2024/execute-results/html.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "hash": "df04c914405c12323ecba08b23982b45", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"Lessons learnt- Novartis Hackathon, Diversity Alliance, OSTCDA\"\ndate: \"9 Sept 2024\"\noutput:\n html_document: \n toc: true\n toc_float: false\n toc_level: 5\n df_print: paged\n---\n\n\n\n# Attendees\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n`````{=html}\n\n\n \n \n \n\n
\n\n\n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n
attendees 09_Sep_24
Christina Fillmore Yes
Lyn Taylor Yes
Molly MacDiarmid Yes
Brian Varney Yes
Chi Zhang No
Orla Doyle Yes
Harshal Khanolkar No
Lily Hseih No
Filip Kabaj No
Martin Brown Yes
Min-Hua Jen Yes
Sarah Rathwell No
Kasa Andras No
Aditee Dani No
Keaven Anderson Yes
Benjamin Arancibia No
Wilmar Igl No
Vikash Jain Yes
Mia Qi No
Leon Shi No
Vandaya Yadav No
Stephen McCawille Yes
Vikrant Vijay No
Vidya Gopal No
Dhvani Patel No
Kyle Lee No
Chelsea Dickens No
David Bosak No
Michael Kane Yes
Lukas Brausch No
Michael Walshe No
Seemani Abhilipsa Yes
Aiming Yang No
Cuifeng Yin No
Todd Coffey No
Jayashree Vedanayagam No
Ashwath Gadapa No
Miriam Amor Yes
Anwesha Roy Yes
Samrit Pramanik Yes
Agnieszka Tomczyk No
Prem Kant Shekhar Yes
Sunil No
Kate Booth No
Peilin Zhou No
\n\n
\n\n`````\n:::\n:::\n\n\n\n# Agenda & Minutes\n\n### Novartis Hackathon\n\nOrla presented back to the group on Novartis's Open-Source in Action: Hackathon. Key points were:\n\n- Aim: to encourage more people to be confident to work in open source and break down barriers in their contributing (such as through git training). To give people exposure to open-source resources that are applicable to their daily work as well as building their network with external experts.\n\n- How: Novartis open-source enablement team will hold hackathons on a regular basis selecting topics that have the potential to impact day-to-day work. External experts to guide Novartis employees on key initiatives and packages. This time CAMIS was selected with Christina providing support.\n\n- When: Prep session 16th July 2024, Intro to git training 17th July, then 2 weeks of hackathon w/c 22nd July and 29th July with support during daily office hours.\n\n- Who: 158 signed up from Advanced quantitative science (AQS), 100+ attended git training, 25+ submitted contributions. 8 SAS, 7 R, 1 Python, 3 SAS vs R and 1 template\n\n- Feedback: Awards for First PR (Quick draw), most closed PRs (Busy bees), Most complex methodology (trailblazing) and Above and beyond (thinking beyond the methods).\n\n- Learnings:\n\n - Timing: aligned to 'summer rejuvenation' period where Novartis get 2 weeks to catch up with reduced meeting loads\n\n - Training: git and renv were a steep learning curve for newcomers, but daily office hours and teams channels helped. Little direction was needed to write content in quarto.\n\n - CAMIS: the natural structure of CAMIS minimized prep work as the gaps in the table show what content is missing. It provided a nice culture to work in, focusing on good quality content over perfection. It was inclusive as it's a multi-language project so could include people who only work in SAS or in R.\n\n - CAMIS repo cloning often hangs if network is busy. Suggestion to reduce size of repo by removing the powerpoint presentations which would improve cloning.\n\n### Content update\n\n- Suggest to add page on how to run/ conduct a hackathon for CAMIS\n\n- Only 4 current open pull requests which all require changes by author so we are up to date\n\n- We still have a lot of open issues, but are making progress. Aim to get issues to 1 page by End of year\n\n### Diversity Alliance Hackathon\n\nThe R in Pharma diversity alliance aspire to be an inclusive R community for developers who wok in the pharma space. Their goal is to provide a welcoming, equitable and supportive space for people to upskill, share knowledge and build a community of diverse voices.\n\nThey are holding an upcoming hackathon as part of the R in Pharma conference, where anyone who considers themselves as under-represented in the R in Pharma space, can participate. The event requires volunteers experienced in open source collaboration to lead attendees in small groups helping them to contribute open source collaborations. If you would like more information, to volunteer or attend, please contact Christina @statasaurus\n\nSee [here](https://opensourceinpharma.github.io/RinPharmaDiversityAlliance/) for more information\n\n### Conferences\n\nThe conferences tab is up to date, we didn't get any volunteers to represent us at PHUSE US connect.\n\n### OSTCDA numeric matching page\n\nMichael Rimler is putting together a repo containing information about \"Open Source Technology in Clinical Data Analysis (OSTCDA) for PHUSE. We now have a 'numerical matching' page [here](https://phuse-org.github.io/OSTCDA/match.html).\\\nPlease review and feel free to suggest changes to the content. Contact Lyn @drlyntaylor for any further information.\n\n**AOB**\n\n- Sarah raised an issue regarding retrieval of the documentation associated with 'old' versions of the R 'stats' package. For contributed packages, the documentation is present, but she's struggling to find the same for the 'stats' package. ACTION: Christina to help investigate.\\\n The issue highlighted that we may have 1 version of a package which mis-matched with SAS, but that later versions would have different functionality, and may match. Keeping the repo up to date will be a challenge, but hopefully if people are using it, issues will be identified and corrected.\\\n It's a reminder to ensure the code runs, from the data wherever possible. An issue for the SASvsR comparison pages is the comparison table is often typed in, such that if numbers change it wont be automatically updated. This is something we could consider in future. Perhaps running the code to populate the comparison table, and putting out a FAIL if conclusion changes from previous run, highlighting we need to update our written text.\n", - "supporting": [ - "9sept2024_files" - ], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/python/MANOVA/execute-results/html.json b/_freeze/python/MANOVA/execute-results/html.json index 51dfae259..83c01bfd1 100644 --- a/_freeze/python/MANOVA/execute-results/html.json +++ b/_freeze/python/MANOVA/execute-results/html.json @@ -2,9 +2,9 @@ "hash": "6a277c7a8f909e3b187f3c0264aa3ece", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"MANOVA\"\n---\n\n\n### MANOVA in Python\n\nMultivariate analysis of variance **(MANOVA)** is a statistical technique used to examine group mean difference of several dependent variables at once while accounting for correlations between the variables.By considering multiple dependent variables simultaneously, MANOVA provides a more comprehensive understanding of group differences and patterns. In context of python, statsmodels library can be used to implement MANOVA.\n\nThe **from_formula()** function is the recommended method to specify a model and simplifies testing without needing to manually configure the contrast matrices.\n\n**Example 39.6 Multivariate Analysis of Variance** from [SAS MANOVA User Guide](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_glm_sect051.htm)\n\nThis example employs multivariate analysis of variance (MANOVA) to measure differences in the chemical characteristics of ancient pottery found at four kiln sites in Great Britain. The data are from Tubb, Parker, and Nickless (1980), as reported in Hand et al. (1994).\n\nFor each of 26 samples of pottery, the percentages of oxides of five metals are measured. The following statements create the data set and invoke the GLM procedure to perform a one-way MANOVA. Additionally, it is of interest to know whether the pottery from one site in Wales (Llanederyn) differs from the samples from other sites; a CONTRAST statement is used to test this hypothesis.\n\n::: {#fb7e7c84 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nfrom statsmodels.multivariate.manova import MANOVA\n\ndf= pd.read_csv(\"../data/manova1.csv\")\ndf.rename(columns={'al':'Al','fe':'Fe','mg':'Mg','ca ':'Ca','na':'Na'},inplace=True)\n\nmanova = MANOVA.from_formula('Al + Fe + Mg + Ca + Na ~ site', data=df)\nresult = manova.mv_test()\nprint(result)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n Multivariate linear model\n===============================================================\n \n---------------------------------------------------------------\n Intercept Value Num DF Den DF F Value Pr > F\n---------------------------------------------------------------\n Wilks' lambda 0.0300 5.0000 18.0000 116.5838 0.0000\n Pillai's trace 0.9700 5.0000 18.0000 116.5838 0.0000\n Hotelling-Lawley trace 32.3844 5.0000 18.0000 116.5838 0.0000\n Roy's greatest root 32.3844 5.0000 18.0000 116.5838 0.0000\n---------------------------------------------------------------\n \n---------------------------------------------------------------\n site Value Num DF Den DF F Value Pr > F\n---------------------------------------------------------------\n Wilks' lambda 0.0123 15.0000 50.0915 13.0885 0.0000\n Pillai's trace 1.5539 15.0000 60.0000 4.2984 0.0000\n Hotelling-Lawley trace 35.4388 15.0000 29.1304 40.5880 0.0000\n Roy's greatest root 34.1611 5.0000 20.0000 136.6445 0.0000\n===============================================================\n\n```\n:::\n:::\n\n\nThe Wilki's lambda test evaluates the significance of group difference across several dependent variables. A lower Wilk's Lambda value suggest more evidence of group difference.\n\nThe Pillai’s Trace test statistics is statistically significant \\[Pillai’s Trace = 1.55, F(6, 72) = 4.29, p \\< 0.001\\] and indicates that sites has a statistically significant association with all the listed elements.\n\n**NOTE**: if you feel you can help with the above discrepancy please contribute to the CAMIS repo by following the instructions on the [contributions page](../contribution/contribution.qmd).\n\n", + "markdown": "---\ntitle: \"MANOVA\"\n---\n\n### MANOVA in Python\n\nMultivariate analysis of variance **(MANOVA)** is a statistical technique used to examine group mean difference of several dependent variables at once while accounting for correlations between the variables.By considering multiple dependent variables simultaneously, MANOVA provides a more comprehensive understanding of group differences and patterns. In context of python, statsmodels library can be used to implement MANOVA.\n\nThe **from_formula()** function is the recommended method to specify a model and simplifies testing without needing to manually configure the contrast matrices.\n\n**Example 39.6 Multivariate Analysis of Variance** from [SAS MANOVA User Guide](https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_glm_sect051.htm)\n\nThis example employs multivariate analysis of variance (MANOVA) to measure differences in the chemical characteristics of ancient pottery found at four kiln sites in Great Britain. The data are from Tubb, Parker, and Nickless (1980), as reported in Hand et al. (1994).\n\nFor each of 26 samples of pottery, the percentages of oxides of five metals are measured. The following statements create the data set and invoke the GLM procedure to perform a one-way MANOVA. Additionally, it is of interest to know whether the pottery from one site in Wales (Llanederyn) differs from the samples from other sites; a CONTRAST statement is used to test this hypothesis.\n\n::: {#7d6b1a80 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nfrom statsmodels.multivariate.manova import MANOVA\n\ndf= pd.read_csv(\"../data/manova1.csv\")\ndf.rename(columns={'al':'Al','fe':'Fe','mg':'Mg','ca ':'Ca','na':'Na'},inplace=True)\n\nmanova = MANOVA.from_formula('Al + Fe + Mg + Ca + Na ~ site', data=df)\nresult = manova.mv_test()\nprint(result)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n Multivariate linear model\n===============================================================\n \n---------------------------------------------------------------\n Intercept Value Num DF Den DF F Value Pr > F\n---------------------------------------------------------------\n Wilks' lambda 0.0300 5.0000 18.0000 116.5838 0.0000\n Pillai's trace 0.9700 5.0000 18.0000 116.5838 0.0000\n Hotelling-Lawley trace 32.3844 5.0000 18.0000 116.5838 0.0000\n Roy's greatest root 32.3844 5.0000 18.0000 116.5838 0.0000\n---------------------------------------------------------------\n \n---------------------------------------------------------------\n site Value Num DF Den DF F Value Pr > F\n---------------------------------------------------------------\n Wilks' lambda 0.0123 15.0000 50.0915 13.0885 0.0000\n Pillai's trace 1.5539 15.0000 60.0000 4.2984 0.0000\n Hotelling-Lawley trace 35.4388 15.0000 29.1304 40.5880 0.0000\n Roy's greatest root 34.1611 5.0000 20.0000 136.6445 0.0000\n===============================================================\n\n```\n:::\n:::\n\n\nThe Wilki's lambda test evaluates the significance of group difference across several dependent variables. A lower Wilk's Lambda value suggest more evidence of group difference.\n\nThe Pillai’s Trace test statistics is statistically significant \\[Pillai’s Trace = 1.55, F(6, 72) = 4.29, p \\< 0.001\\] and indicates that sites has a statistically significant association with all the listed elements.\n\n**NOTE**: if you feel you can help with the above discrepancy please contribute to the CAMIS repo by following the instructions on the [contributions page](../contribution/contribution.qmd).\n\n", "supporting": [ - "MANOVA_files/figure-html" + "MANOVA_files" ], "filters": [], "includes": {} diff --git a/_freeze/python/Rounding/execute-results/html.json b/_freeze/python/Rounding/execute-results/html.json index 080c76b89..a32805558 100644 --- a/_freeze/python/Rounding/execute-results/html.json +++ b/_freeze/python/Rounding/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "9515c302620831a81785e24c3558e448", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Rounding in Python\"\noutput: html_document\n---\n\n\nPython has a built-in **round()** function that takes two numeric arguments, *number* and *ndigits*, and returns a floating point number that is a rounded version of the number up to the specified number of decimals.\n\nThe default number of decimal is 0, meaning that the function will return the nearest integer.\n\nThe round() function in Python will round to the nearest whole number and 'rounding to the even number' when equidistant, meaning that exactly 12.5 rounds to the integer 12.\n\n::: {#2e3a432a .cell execution_count=1}\n``` {.python .cell-code}\n# For integers\nx= 12\nprint(round(x))\n \n# For floating point\nx= 12.3\nprint(round(22.7)) \n \n# if the second parameter is present\n \n# when the (ndigit+1)th digit is =5 \nx=4.465\nprint(round(x, 2)) \n \n# when the (ndigit+1)th digit is >=5 \nx=4.476\nprint(round(x, 2)) \n \n# when the (ndigit+1)th digit is <5 \nx=4.473\nprint(round(x, 2))\n\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n12\n23\n4.46\n4.48\n4.47\n```\n:::\n:::\n\n\n", + "markdown": "---\ntitle: \"Rounding in Python\"\noutput: html_document\n---\n\nPython has a built-in **round()** function that takes two numeric arguments, *number* and *ndigits*, and returns a floating point number that is a rounded version of the number up to the specified number of decimals.\n\nThe default number of decimal is 0, meaning that the function will return the nearest integer.\n\nThe round() function in Python will round to the nearest whole number and 'rounding to the even number' when equidistant, meaning that exactly 12.5 rounds to the integer 12.\n\n::: {#ca7e56cd .cell execution_count=1}\n``` {.python .cell-code}\n# For integers\nx= 12\nprint(round(x))\n \n# For floating point\nx= 12.3\nprint(round(22.7)) \n \n# if the second parameter is present\n \n# when the (ndigit+1)th digit is =5 \nx=4.465\nprint(round(x, 2)) \n \n# when the (ndigit+1)th digit is >=5 \nx=4.476\nprint(round(x, 2)) \n \n# when the (ndigit+1)th digit is <5 \nx=4.473\nprint(round(x, 2))\n\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n12\n23\n4.46\n4.48\n4.47\n```\n:::\n:::\n\n\n", "supporting": [ "Rounding_files" ], diff --git a/_freeze/python/Summary_statistics/execute-results/html.json b/_freeze/python/Summary_statistics/execute-results/html.json index 8fe33366a..aa4a6b552 100644 --- a/_freeze/python/Summary_statistics/execute-results/html.json +++ b/_freeze/python/Summary_statistics/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "e036a0b4d4e2ea0b8dfbd9e2dde34fad", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Summary statistics\"\noutput: html_document\n---\n\n\nThe **numpy.percentile()** function is useful to determine the *n^th^-percentile* for the given array of data. It returns an array with percentile values or a scalar along the specified axis. The function accepts the following parameters:\n\n1.*array*: The array of data whose percentile needs to be calculated.\n\n2.*percentile*: Denotes the percentile that needs to be computed.\n\n3.*axis* (optional): Denotes the axis along which the percentile is calculated. By default, a flattened array is used.\n\n4.*out* (optional): An alternate output array where we can place the result.\n\n5.*overwrite_input* (optional): Used to modify the input array.\n\n6.*keepdims* (optional): Creates reduced axes with dimensions of one size.\n\n::: {#1d418585 .cell execution_count=1}\n``` {.python .cell-code}\nimport numpy as np\n\nsample_data=[12, 25, 16, 50, 34, 29, 60, 86, 52, 39, 41]\n\npercentile = np.percentile(sample_data,75)\n\nprint(percentile)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n51.0\n```\n:::\n:::\n\n\n", + "markdown": "---\ntitle: \"Summary statistics\"\noutput: html_document\n---\n\nThe **numpy.percentile()** function is useful to determine the *n^th^-percentile* for the given array of data. It returns an array with percentile values or a scalar along the specified axis. The function accepts the following parameters:\n\n1.*array*: The array of data whose percentile needs to be calculated.\n\n2.*percentile*: Denotes the percentile that needs to be computed.\n\n3.*axis* (optional): Denotes the axis along which the percentile is calculated. By default, a flattened array is used.\n\n4.*out* (optional): An alternate output array where we can place the result.\n\n5.*overwrite_input* (optional): Used to modify the input array.\n\n6.*keepdims* (optional): Creates reduced axes with dimensions of one size.\n\n::: {#ab349748 .cell execution_count=1}\n``` {.python .cell-code}\nimport numpy as np\n\nsample_data=[12, 25, 16, 50, 34, 29, 60, 86, 52, 39, 41]\n\npercentile = np.percentile(sample_data,75)\n\nprint(percentile)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n51.0\n```\n:::\n:::\n\n\n", "supporting": [ "Summary_statistics_files" ], diff --git a/_freeze/python/ancova/execute-results/html.json b/_freeze/python/ancova/execute-results/html.json index a79301123..d9c8011cf 100644 --- a/_freeze/python/ancova/execute-results/html.json +++ b/_freeze/python/ancova/execute-results/html.json @@ -2,9 +2,9 @@ "hash": "8b3a61ff28608e9874519d06a7000bc6", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Ancova\"\noutput: html_document\n---\n\n\n## Introduction\n\nIn this example, we're looking at [Analysis of Covariance](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_glm_examples04.htm). ANCOVA is typically used to analyse treatment differences, to see examples of prediction models go to the [simple linear regression page](linear_regression.qmd).\n\n## Data Summary\n\n::: {#32849ca1 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Input data\ndata = {\n 'drug': [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\",\n \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\",\n \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\"],\n 'pre': [11, 8, 5, 14, 19, 6, 10, 6, 11, 3,\n 6, 6, 7, 8, 18, 8, 19, 8, 5, 15,\n 16, 13, 11, 9, 21, 16, 12, 12, 7, 12],\n 'post': [6, 0, 2, 8, 11, 4, 13, 1, 8, 0,\n 0, 2, 3, 1, 18, 4, 14, 9, 1, 9,\n 13, 10, 18, 5, 23, 12, 5, 16, 1, 20]\n}\n\ndf = pd.DataFrame(data)\n```\n:::\n\n\n::: {#7e644f6c .cell execution_count=2}\n``` {.python .cell-code}\n# Descriptive statistics\nsummary_stats = df.describe()\n\n# Calculate median\nmedian_pre = df['pre'].median()\nmedian_post = df['post'].median()\n\n# Add median to the summary statistics\nsummary_stats.loc['median'] = [median_pre, median_post]\n\nprint(summary_stats)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n pre post\ncount 30.000000 30.000000\nmean 10.733333 7.900000\nstd 4.791755 6.666178\nmin 3.000000 0.000000\n25% 7.000000 2.000000\n50% 10.500000 7.000000\n75% 13.750000 12.750000\nmax 21.000000 23.000000\nmedian 10.500000 7.000000\n```\n:::\n:::\n\n\n## Ancova in Python\n\nIn Python, Ancova can be performed using the [statsmodels](https://www.statsmodels.org/stable/index.html) library from the scipy package.\n\n::: {#7e77f8b3 .cell execution_count=3}\n``` {.python .cell-code}\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom tabulate import tabulate\n\n# Fit the ANCOVA model\nmodel_ancova = smf.ols('post ~ drug + pre', data=df).fit()\n\n# Summary of the model\nmodel_summary = model_ancova.summary()\nprint(model_summary)\n\n# Extracting glance (summary) information\nmodel_glance = {\n 'r_squared': model_ancova.rsquared,\n 'adj_r_squared': model_ancova.rsquared_adj,\n 'f_statistic': model_ancova.fvalue,\n 'f_pvalue': model_ancova.f_pvalue,\n 'aic': model_ancova.aic,\n 'bic': model_ancova.bic\n}\nmodel_glance_df = pd.DataFrame([model_glance])\nprint(tabulate(model_glance_df, headers='keys', tablefmt='grid'))\n\n# Extracting tidy (coefficients) information\nmodel_tidy = model_ancova.summary2().tables[1]\nprint(tabulate(model_tidy, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n OLS Regression Results \n==============================================================================\nDep. Variable: post R-squared: 0.676\nModel: OLS Adj. R-squared: 0.639\nMethod: Least Squares F-statistic: 18.10\nDate: Wed, 20 Aug 2025 Prob (F-statistic): 1.50e-06\nTime: 14:01:49 Log-Likelihood: -82.054\nNo. Observations: 30 AIC: 172.1\nDf Residuals: 26 BIC: 177.7\nDf Model: 3 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -3.8808 1.986 -1.954 0.062 -7.964 0.202\ndrug[T.D] 0.1090 1.795 0.061 0.952 -3.581 3.799\ndrug[T.F] 3.4461 1.887 1.826 0.079 -0.432 7.324\npre 0.9872 0.164 6.001 0.000 0.649 1.325\n==============================================================================\nOmnibus: 2.609 Durbin-Watson: 2.526\nProb(Omnibus): 0.271 Jarque-Bera (JB): 2.148\nSkew: 0.645 Prob(JB): 0.342\nKurtosis: 2.765 Cond. No. 39.8\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n+----+-------------+-----------------+---------------+-------------+---------+---------+\n| | r_squared | adj_r_squared | f_statistic | f_pvalue | aic | bic |\n+====+=============+=================+===============+=============+=========+=========+\n| 0 | 0.676261 | 0.638906 | 18.1039 | 1.50137e-06 | 172.108 | 177.712 |\n+----+-------------+-----------------+---------------+-------------+---------+---------+\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| | Coef. | Std.Err. | t | P>|t| | [0.025 | 0.975] |\n+===========+===========+============+============+=============+===========+==========+\n| Intercept | -3.88081 | 1.9862 | -1.95388 | 0.0615519 | -7.96351 | 0.201887 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| drug[T.D] | 0.108971 | 1.79514 | 0.0607037 | 0.952059 | -3.58098 | 3.79892 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| drug[T.F] | 3.44614 | 1.88678 | 1.82646 | 0.0792846 | -0.432195 | 7.32447 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| pre | 0.987184 | 0.164498 | 6.00121 | 2.45433e-06 | 0.649054 | 1.32531 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n```\n:::\n:::\n\n\nPlease note that all values match with the corresponding [R version](https://psiaims.github.io/CAMIS/R/ancova.html), except for the AIC and BIC values, which differ slightly. This should be acceptable for most practical purposes in statistical analysis. Currently, there are [ongoing discussions](https://github.com/statsmodels/statsmodels/issues/1802) in the statsmodels community regarding the computational details of AIC and BIC.\n\nThe following code can be used to enforce complete consistency of AIC and BIC values with R outputs by adding 1 to the number of parameters:\n\n::: {#98748e99 .cell execution_count=4}\n``` {.python .cell-code}\nimport numpy as np\n\n# Manual calculation of AIC and BIC to ensure consistency with R\nn = df.shape[0] # number of observations\nk = model_ancova.df_model + 1 # number of parameters (including intercept)\nlog_lik = model_ancova.llf # log-likelihood\n\n# Adjusted number of parameters (including scale parameter)\nk_adjusted = k + 1\n\n# Manually calculate AIC and BIC to match R's behavior\naic_adjusted = 2 * k_adjusted - 2 * log_lik\nbic_adjusted = np.log(n) * k_adjusted - 2 * log_lik\n\nprint(f\"Number of observations (n): {n}\")\nprint(f\"Number of parameters (k_adjusted): {k_adjusted}\")\nprint(f\"Log-likelihood: {log_lik}\")\nprint(f\"AIC (adjusted): {aic_adjusted}\")\nprint(f\"BIC (adjusted): {bic_adjusted}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nNumber of observations (n): 30\nNumber of parameters (k_adjusted): 5.0\nLog-likelihood: -82.0537744890265\nAIC (adjusted): 174.107548978053\nBIC (adjusted): 181.11353588636376\n```\n:::\n:::\n\n\nThere are different types of anova computations. The [statsmodels.stats.anova.anova_lm](https://www.statsmodels.org/stable/generated/statsmodels.stats.anova.anova_lm.html) function allows the types 1, 2 and 3. The code to compute these types is depicted below:\n\n::: {#d7fdccf9 .cell execution_count=5}\n``` {.python .cell-code}\nimport statsmodels.formula.api as smf\nimport statsmodels.stats.anova as ssa\n\n# Center the predictor for Type III anova\n#df['pre_centered'] = df['pre'] - df['pre'].mean()\n\n# Fit the model for types I and II anova\nmodel = smf.ols('post ~ C(drug) + pre', data=df).fit()\n\n# Perform anova for types I and II\nancova_table_type_1 = ssa.anova_lm(model, typ=1)\nancova_table_type_2 = ssa.anova_lm(model, typ=2)\n\n# Fit the model for Type III anova with centered predictors\nmodel_type_3 = smf.ols('post ~ C(drug) + pre', data=df).fit()\nancova_table_type_3 = ssa.anova_lm(model_type_3, typ=3)\n\n# Calculate SSd (sum of squares for residuals)\nssd_type1 = ancova_table_type_1['sum_sq'].loc['Residual']\nssd_type2 = ancova_table_type_2['sum_sq'].loc['Residual']\nssd_type3 = ancova_table_type_3['sum_sq'].loc['Residual']\n\n# Calculate ges\nancova_table_type_1['ges'] = ancova_table_type_1['sum_sq'] / (ancova_table_type_1['sum_sq'] + ssd_type1)\nancova_table_type_2['ges'] = ancova_table_type_2['sum_sq'] / (ancova_table_type_2['sum_sq'] + ssd_type2)\nancova_table_type_3['ges'] = ancova_table_type_3['sum_sq'] / (ancova_table_type_3['sum_sq'] + ssd_type3)\n\n# Add SSd column\nancova_table_type_1['SSd'] = ssd_type1\nancova_table_type_2['SSd'] = ssd_type2\nancova_table_type_3['SSd'] = ssd_type3\n\n# Add significance column\nancova_table_type_1['p<0.05'] = ancova_table_type_1['PR(>F)'] < 0.05\nancova_table_type_2['p<0.05'] = ancova_table_type_2['PR(>F)'] < 0.05\nancova_table_type_3['p<0.05'] = ancova_table_type_3['PR(>F)'] < 0.05\n\n# Rename columns to match the R output\nancova_table_type_1.rename(columns={'sum_sq': 'SSn', 'df': 'DFn', 'F': 'F', 'PR(>F)': 'p'}, inplace=True)\nancova_table_type_1.reset_index(inplace=True)\nancova_table_type_1.rename(columns={'index': 'Effect'}, inplace=True)\n\nancova_table_type_2.rename(columns={'sum_sq': 'SSn', 'df': 'DFn', 'F': 'F', 'PR(>F)': 'p'}, inplace=True)\nancova_table_type_2.reset_index(inplace=True)\nancova_table_type_2.rename(columns={'index': 'Effect'}, inplace=True)\n\nancova_table_type_3.rename(columns={'sum_sq': 'SSn', 'df': 'DFn', 'F': 'F', 'PR(>F)': 'p'}, inplace=True)\nancova_table_type_3.reset_index(inplace=True)\nancova_table_type_3.rename(columns={'index': 'Effect'}, inplace=True)\n\n# Calculate DFd (degrees of freedom for residuals)\ndfd_type1 = ancova_table_type_1.loc[ancova_table_type_1['Effect'] == 'Residual', 'DFn'].values[0]\ndfd_type2 = ancova_table_type_2.loc[ancova_table_type_2['Effect'] == 'Residual', 'DFn'].values[0]\ndfd_type3 = ancova_table_type_3.loc[ancova_table_type_3['Effect'] == 'Residual', 'DFn'].values[0]\nancova_table_type_1['DFd'] = dfd_type1\nancova_table_type_2['DFd'] = dfd_type2\nancova_table_type_3['DFd'] = dfd_type3\n\n# Filter out the Residual row\nancova_table_type_1 = ancova_table_type_1[ancova_table_type_1['Effect'] != 'Residual']\nancova_table_type_2 = ancova_table_type_2[ancova_table_type_2['Effect'] != 'Residual']\nancova_table_type_3 = ancova_table_type_3[ancova_table_type_3['Effect'] != 'Residual']\n\n# Select and reorder columns to match the R output\nancova_table_type_1 = ancova_table_type_1[['Effect', 'DFn', 'DFd', 'SSn', 'SSd', 'F', 'p', 'p<0.05', 'ges']]\nancova_table_type_2 = ancova_table_type_2[['Effect', 'DFn', 'DFd', 'SSn', 'SSd', 'F', 'p', 'p<0.05', 'ges']]\nancova_table_type_3 = ancova_table_type_3[['Effect', 'DFn', 'DFd', 'SSn', 'SSd', 'F', 'p', 'p<0.05', 'ges']]\n```\n:::\n\n\n## Type 1 Ancova in Python\n\n::: {#4f2cb73e .cell execution_count=6}\n``` {.python .cell-code}\nprint(tabulate(ancova_table_type_1, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n+----+----------+-------+-------+---------+---------+----------+-------------+----------+----------+\n| | Effect | DFn | DFd | SSn | SSd | F | p | p<0.05 | ges |\n+====+==========+=======+=======+=========+=========+==========+=============+==========+==========+\n| 0 | C(drug) | 2 | 26 | 293.6 | 417.203 | 9.14855 | 0.000981237 | True | 0.413054 |\n+----+----------+-------+-------+---------+---------+----------+-------------+----------+----------+\n| 1 | pre | 1 | 26 | 577.897 | 417.203 | 36.0145 | 2.45433e-06 | True | 0.580743 |\n+----+----------+-------+-------+---------+---------+----------+-------------+----------+----------+\n```\n:::\n:::\n\n\n## Type 2 Ancova in Python\n\n::: {#66b6a51a .cell execution_count=7}\n``` {.python .cell-code}\nprint(tabulate(ancova_table_type_2, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n+----+----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| | Effect | DFn | DFd | SSn | SSd | F | p | p<0.05 | ges |\n+====+==========+=======+=======+==========+=========+==========+=============+==========+==========+\n| 0 | C(drug) | 2 | 26 | 68.5537 | 417.203 | 2.13613 | 0.138379 | False | 0.141128 |\n+----+----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| 1 | pre | 1 | 26 | 577.897 | 417.203 | 36.0145 | 2.45433e-06 | True | 0.580743 |\n+----+----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n```\n:::\n:::\n\n\n## Type 3 Ancova in Python\n\n::: {#c88d8305 .cell execution_count=8}\n``` {.python .cell-code}\nprint(tabulate(ancova_table_type_3, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| | Effect | DFn | DFd | SSn | SSd | F | p | p<0.05 | ges |\n+====+===========+=======+=======+==========+=========+==========+=============+==========+==========+\n| 0 | Intercept | 1 | 26 | 61.2592 | 417.203 | 3.81767 | 0.0615519 | False | 0.128034 |\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| 1 | C(drug) | 2 | 26 | 68.5537 | 417.203 | 2.13613 | 0.138379 | False | 0.141128 |\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| 2 | pre | 1 | 26 | 577.897 | 417.203 | 36.0145 | 2.45433e-06 | True | 0.580743 |\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n```\n:::\n:::\n\n\nPlease note that the results are consistent with the results achieved with [R](https://psiaims.github.io/CAMIS/R/ancova.html), except for the first row of the type 3 table featuring the intercept.\n\n", + "markdown": "---\ntitle: \"Ancova\"\noutput: html_document\n---\n\n## Introduction\n\nIn this example, we're looking at [Analysis of Covariance](https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_glm_examples04.htm). ANCOVA is typically used to analyse treatment differences, to see examples of prediction models go to the [simple linear regression page](linear_regression.qmd).\n\n## Data Summary\n\n::: {#c7ce9a89 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Input data\ndata = {\n 'drug': [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\",\n \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\", \"D\",\n \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\", \"F\"],\n 'pre': [11, 8, 5, 14, 19, 6, 10, 6, 11, 3,\n 6, 6, 7, 8, 18, 8, 19, 8, 5, 15,\n 16, 13, 11, 9, 21, 16, 12, 12, 7, 12],\n 'post': [6, 0, 2, 8, 11, 4, 13, 1, 8, 0,\n 0, 2, 3, 1, 18, 4, 14, 9, 1, 9,\n 13, 10, 18, 5, 23, 12, 5, 16, 1, 20]\n}\n\ndf = pd.DataFrame(data)\n```\n:::\n\n\n::: {#93347a16 .cell execution_count=2}\n``` {.python .cell-code}\n# Descriptive statistics\nsummary_stats = df.describe()\n\n# Calculate median\nmedian_pre = df['pre'].median()\nmedian_post = df['post'].median()\n\n# Add median to the summary statistics\nsummary_stats.loc['median'] = [median_pre, median_post]\n\nprint(summary_stats)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n pre post\ncount 30.000000 30.000000\nmean 10.733333 7.900000\nstd 4.791755 6.666178\nmin 3.000000 0.000000\n25% 7.000000 2.000000\n50% 10.500000 7.000000\n75% 13.750000 12.750000\nmax 21.000000 23.000000\nmedian 10.500000 7.000000\n```\n:::\n:::\n\n\n## Ancova in Python\n\nIn Python, Ancova can be performed using the [statsmodels](https://www.statsmodels.org/stable/index.html) library from the scipy package.\n\n::: {#12056283 .cell execution_count=3}\n``` {.python .cell-code}\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom tabulate import tabulate\n\n# Fit the ANCOVA model\nmodel_ancova = smf.ols('post ~ drug + pre', data=df).fit()\n\n# Summary of the model\nmodel_summary = model_ancova.summary()\nprint(model_summary)\n\n# Extracting glance (summary) information\nmodel_glance = {\n 'r_squared': model_ancova.rsquared,\n 'adj_r_squared': model_ancova.rsquared_adj,\n 'f_statistic': model_ancova.fvalue,\n 'f_pvalue': model_ancova.f_pvalue,\n 'aic': model_ancova.aic,\n 'bic': model_ancova.bic\n}\nmodel_glance_df = pd.DataFrame([model_glance])\nprint(tabulate(model_glance_df, headers='keys', tablefmt='grid'))\n\n# Extracting tidy (coefficients) information\nmodel_tidy = model_ancova.summary2().tables[1]\nprint(tabulate(model_tidy, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n OLS Regression Results \n==============================================================================\nDep. Variable: post R-squared: 0.676\nModel: OLS Adj. R-squared: 0.639\nMethod: Least Squares F-statistic: 18.10\nDate: Tue, 17 Mar 2026 Prob (F-statistic): 1.50e-06\nTime: 16:37:44 Log-Likelihood: -82.054\nNo. Observations: 30 AIC: 172.1\nDf Residuals: 26 BIC: 177.7\nDf Model: 3 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -3.8808 1.986 -1.954 0.062 -7.964 0.202\ndrug[T.D] 0.1090 1.795 0.061 0.952 -3.581 3.799\ndrug[T.F] 3.4461 1.887 1.826 0.079 -0.432 7.324\npre 0.9872 0.164 6.001 0.000 0.649 1.325\n==============================================================================\nOmnibus: 2.609 Durbin-Watson: 2.526\nProb(Omnibus): 0.271 Jarque-Bera (JB): 2.148\nSkew: 0.645 Prob(JB): 0.342\nKurtosis: 2.765 Cond. No. 39.8\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n+----+-------------+-----------------+---------------+-------------+---------+---------+\n| | r_squared | adj_r_squared | f_statistic | f_pvalue | aic | bic |\n+====+=============+=================+===============+=============+=========+=========+\n| 0 | 0.676261 | 0.638906 | 18.1039 | 1.50137e-06 | 172.108 | 177.712 |\n+----+-------------+-----------------+---------------+-------------+---------+---------+\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| | Coef. | Std.Err. | t | P>|t| | [0.025 | 0.975] |\n+===========+===========+============+============+=============+===========+==========+\n| Intercept | -3.88081 | 1.9862 | -1.95388 | 0.0615519 | -7.96351 | 0.201887 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| drug[T.D] | 0.108971 | 1.79514 | 0.0607037 | 0.952059 | -3.58098 | 3.79892 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| drug[T.F] | 3.44614 | 1.88678 | 1.82646 | 0.0792846 | -0.432195 | 7.32447 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n| pre | 0.987184 | 0.164498 | 6.00121 | 2.45433e-06 | 0.649054 | 1.32531 |\n+-----------+-----------+------------+------------+-------------+-----------+----------+\n```\n:::\n:::\n\n\nPlease note that all values match with the corresponding [R version](https://psiaims.github.io/CAMIS/R/ancova.html), except for the AIC and BIC values, which differ slightly. This should be acceptable for most practical purposes in statistical analysis. Currently, there are [ongoing discussions](https://github.com/statsmodels/statsmodels/issues/1802) in the statsmodels community regarding the computational details of AIC and BIC.\n\nThe following code can be used to enforce complete consistency of AIC and BIC values with R outputs by adding 1 to the number of parameters:\n\n::: {#5bb5524e .cell execution_count=4}\n``` {.python .cell-code}\nimport numpy as np\n\n# Manual calculation of AIC and BIC to ensure consistency with R\nn = df.shape[0] # number of observations\nk = model_ancova.df_model + 1 # number of parameters (including intercept)\nlog_lik = model_ancova.llf # log-likelihood\n\n# Adjusted number of parameters (including scale parameter)\nk_adjusted = k + 1\n\n# Manually calculate AIC and BIC to match R's behavior\naic_adjusted = 2 * k_adjusted - 2 * log_lik\nbic_adjusted = np.log(n) * k_adjusted - 2 * log_lik\n\nprint(f\"Number of observations (n): {n}\")\nprint(f\"Number of parameters (k_adjusted): {k_adjusted}\")\nprint(f\"Log-likelihood: {log_lik}\")\nprint(f\"AIC (adjusted): {aic_adjusted}\")\nprint(f\"BIC (adjusted): {bic_adjusted}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nNumber of observations (n): 30\nNumber of parameters (k_adjusted): 5.0\nLog-likelihood: -82.0537744890265\nAIC (adjusted): 174.107548978053\nBIC (adjusted): 181.11353588636376\n```\n:::\n:::\n\n\nThere are different types of anova computations. The [statsmodels.stats.anova.anova_lm](https://www.statsmodels.org/stable/generated/statsmodels.stats.anova.anova_lm.html) function allows the types 1, 2 and 3. The code to compute these types is depicted below:\n\n::: {#e3be5a60 .cell execution_count=5}\n``` {.python .cell-code}\nimport statsmodels.formula.api as smf\nimport statsmodels.stats.anova as ssa\n\n# Center the predictor for Type III anova\n#df['pre_centered'] = df['pre'] - df['pre'].mean()\n\n# Fit the model for types I and II anova\nmodel = smf.ols('post ~ C(drug) + pre', data=df).fit()\n\n# Perform anova for types I and II\nancova_table_type_1 = ssa.anova_lm(model, typ=1)\nancova_table_type_2 = ssa.anova_lm(model, typ=2)\n\n# Fit the model for Type III anova with centered predictors\nmodel_type_3 = smf.ols('post ~ C(drug) + pre', data=df).fit()\nancova_table_type_3 = ssa.anova_lm(model_type_3, typ=3)\n\n# Calculate SSd (sum of squares for residuals)\nssd_type1 = ancova_table_type_1['sum_sq'].loc['Residual']\nssd_type2 = ancova_table_type_2['sum_sq'].loc['Residual']\nssd_type3 = ancova_table_type_3['sum_sq'].loc['Residual']\n\n# Calculate ges\nancova_table_type_1['ges'] = ancova_table_type_1['sum_sq'] / (ancova_table_type_1['sum_sq'] + ssd_type1)\nancova_table_type_2['ges'] = ancova_table_type_2['sum_sq'] / (ancova_table_type_2['sum_sq'] + ssd_type2)\nancova_table_type_3['ges'] = ancova_table_type_3['sum_sq'] / (ancova_table_type_3['sum_sq'] + ssd_type3)\n\n# Add SSd column\nancova_table_type_1['SSd'] = ssd_type1\nancova_table_type_2['SSd'] = ssd_type2\nancova_table_type_3['SSd'] = ssd_type3\n\n# Add significance column\nancova_table_type_1['p<0.05'] = ancova_table_type_1['PR(>F)'] < 0.05\nancova_table_type_2['p<0.05'] = ancova_table_type_2['PR(>F)'] < 0.05\nancova_table_type_3['p<0.05'] = ancova_table_type_3['PR(>F)'] < 0.05\n\n# Rename columns to match the R output\nancova_table_type_1.rename(columns={'sum_sq': 'SSn', 'df': 'DFn', 'F': 'F', 'PR(>F)': 'p'}, inplace=True)\nancova_table_type_1.reset_index(inplace=True)\nancova_table_type_1.rename(columns={'index': 'Effect'}, inplace=True)\n\nancova_table_type_2.rename(columns={'sum_sq': 'SSn', 'df': 'DFn', 'F': 'F', 'PR(>F)': 'p'}, inplace=True)\nancova_table_type_2.reset_index(inplace=True)\nancova_table_type_2.rename(columns={'index': 'Effect'}, inplace=True)\n\nancova_table_type_3.rename(columns={'sum_sq': 'SSn', 'df': 'DFn', 'F': 'F', 'PR(>F)': 'p'}, inplace=True)\nancova_table_type_3.reset_index(inplace=True)\nancova_table_type_3.rename(columns={'index': 'Effect'}, inplace=True)\n\n# Calculate DFd (degrees of freedom for residuals)\ndfd_type1 = ancova_table_type_1.loc[ancova_table_type_1['Effect'] == 'Residual', 'DFn'].values[0]\ndfd_type2 = ancova_table_type_2.loc[ancova_table_type_2['Effect'] == 'Residual', 'DFn'].values[0]\ndfd_type3 = ancova_table_type_3.loc[ancova_table_type_3['Effect'] == 'Residual', 'DFn'].values[0]\nancova_table_type_1['DFd'] = dfd_type1\nancova_table_type_2['DFd'] = dfd_type2\nancova_table_type_3['DFd'] = dfd_type3\n\n# Filter out the Residual row\nancova_table_type_1 = ancova_table_type_1[ancova_table_type_1['Effect'] != 'Residual']\nancova_table_type_2 = ancova_table_type_2[ancova_table_type_2['Effect'] != 'Residual']\nancova_table_type_3 = ancova_table_type_3[ancova_table_type_3['Effect'] != 'Residual']\n\n# Select and reorder columns to match the R output\nancova_table_type_1 = ancova_table_type_1[['Effect', 'DFn', 'DFd', 'SSn', 'SSd', 'F', 'p', 'p<0.05', 'ges']]\nancova_table_type_2 = ancova_table_type_2[['Effect', 'DFn', 'DFd', 'SSn', 'SSd', 'F', 'p', 'p<0.05', 'ges']]\nancova_table_type_3 = ancova_table_type_3[['Effect', 'DFn', 'DFd', 'SSn', 'SSd', 'F', 'p', 'p<0.05', 'ges']]\n```\n:::\n\n\n## Type 1 Ancova in Python\n\n::: {#0567e95b .cell execution_count=6}\n``` {.python .cell-code}\nprint(tabulate(ancova_table_type_1, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n+----+----------+-------+-------+---------+---------+----------+-------------+----------+----------+\n| | Effect | DFn | DFd | SSn | SSd | F | p | p<0.05 | ges |\n+====+==========+=======+=======+=========+=========+==========+=============+==========+==========+\n| 0 | C(drug) | 2 | 26 | 293.6 | 417.203 | 9.14855 | 0.000981237 | True | 0.413054 |\n+----+----------+-------+-------+---------+---------+----------+-------------+----------+----------+\n| 1 | pre | 1 | 26 | 577.897 | 417.203 | 36.0145 | 2.45433e-06 | True | 0.580743 |\n+----+----------+-------+-------+---------+---------+----------+-------------+----------+----------+\n```\n:::\n:::\n\n\n## Type 2 Ancova in Python\n\n::: {#54c3159d .cell execution_count=7}\n``` {.python .cell-code}\nprint(tabulate(ancova_table_type_2, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n+----+----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| | Effect | DFn | DFd | SSn | SSd | F | p | p<0.05 | ges |\n+====+==========+=======+=======+==========+=========+==========+=============+==========+==========+\n| 0 | C(drug) | 2 | 26 | 68.5537 | 417.203 | 2.13613 | 0.138379 | False | 0.141128 |\n+----+----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| 1 | pre | 1 | 26 | 577.897 | 417.203 | 36.0145 | 2.45433e-06 | True | 0.580743 |\n+----+----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n```\n:::\n:::\n\n\n## Type 3 Ancova in Python\n\n::: {#109ec9c1 .cell execution_count=8}\n``` {.python .cell-code}\nprint(tabulate(ancova_table_type_3, headers='keys', tablefmt='grid'))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| | Effect | DFn | DFd | SSn | SSd | F | p | p<0.05 | ges |\n+====+===========+=======+=======+==========+=========+==========+=============+==========+==========+\n| 0 | Intercept | 1 | 26 | 61.2592 | 417.203 | 3.81767 | 0.0615519 | False | 0.128034 |\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| 1 | C(drug) | 2 | 26 | 68.5537 | 417.203 | 2.13613 | 0.138379 | False | 0.141128 |\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n| 2 | pre | 1 | 26 | 577.897 | 417.203 | 36.0145 | 2.45433e-06 | True | 0.580743 |\n+----+-----------+-------+-------+----------+---------+----------+-------------+----------+----------+\n```\n:::\n:::\n\n\nPlease note that the results are consistent with the results achieved with [R](https://psiaims.github.io/CAMIS/R/ancova.html), except for the first row of the type 3 table featuring the intercept.\n\n", "supporting": [ - "ancova_files/figure-html" + "ancova_files" ], "filters": [], "includes": {} diff --git a/_freeze/python/anova/execute-results/html.json b/_freeze/python/anova/execute-results/html.json index b7ae7650b..0f3e52a72 100644 --- a/_freeze/python/anova/execute-results/html.json +++ b/_freeze/python/anova/execute-results/html.json @@ -2,14 +2,14 @@ "hash": "8bcffc452f7c9678ce131ed30ec76e4e", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"ANOVA\"\n---\n\n\n## Introduction\n\nAnalysis of VAriance *(ANOVA)* is a statistical test to measure the difference between means of more than two groups.It is best suited when the data is normally distributed. By partitioning total variance into components, ANOVA unravels relationship between variables and identifies the true source of variation. It can handle multiple factors and their interactions, providing a robust way to better understand intricate relationships.\n\n## Anova Test in Python\n\nTo perform a one-way ANOVA test in Python we can use the **f_oneway()** function from SciPy library.\nSimilarly, to perform two-way ANOVA test **anova_lm()** function from the statsmodel library is frequently used.\n\nFor this test, we’ll create a data frame called df_disease taken from the SAS documentation. The corresponding data can be found [here](https://github.com/PSIAIMS/CAMIS/blob/main/data/sas_disease.csv). In this experiment, we are trying to find the impact of different drug and disease group on the `stem-length` \n\n::: {#ab4d9f75 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\n\n# Read the sample data\ndf = pd.read_csv(\"../data/sas_disease.csv\")\n\n\n#perform two-way ANOVA\nmodel = ols('y ~ C(drug) + C(disease) + C(drug):C(disease)', data=df).fit()\nsm.stats.anova_lm(model, typ=2)\n```\n\n::: {.cell-output .cell-output-display execution_count=1}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
sum_sqdfFPR(>F)
C(drug)3063.4328633.09.2450960.000067
C(disease)418.8337412.01.8959900.161720
C(drug):C(disease)707.2662596.01.0672250.395846
Residual5080.81666746.0NaNNaN
\n
\n```\n:::\n:::\n\n\n## Sum of Squares Tables\n\n### Type I\n\n::: {#49806d87 .cell execution_count=2}\n``` {.python .cell-code}\nmodel = ols('y ~ C(drug) + C(disease) + C(drug):C(disease)', data=df).fit()\nsm.stats.anova_lm(model)\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
dfsum_sqmean_sqFPR(>F)
C(drug)3.03133.2385061044.4128359.4557610.000056
C(disease)2.0418.833741209.4168701.8959900.161720
C(drug):C(disease)6.0707.266259117.8777101.0672250.395846
Residual46.05080.816667110.452536NaNNaN
\n
\n```\n:::\n:::\n\n\n### Type II\n\n::: {#b2c9e8c3 .cell execution_count=3}\n``` {.python .cell-code}\nmodel = ols('y ~ C(drug) + C(disease) + C(drug):C(disease)', data=df).fit()\nsm.stats.anova_lm(model, typ=2)\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
sum_sqdfFPR(>F)
C(drug)3063.4328633.09.2450960.000067
C(disease)418.8337412.01.8959900.161720
C(drug):C(disease)707.2662596.01.0672250.395846
Residual5080.81666746.0NaNNaN
\n
\n```\n:::\n:::\n\n\n### Type III\n\n::: {#f117dca7 .cell execution_count=4}\n``` {.python .cell-code}\nmodel = ols('y ~ C(drug,Sum) + C(disease,Sum) + C(drug,Sum):C(disease,Sum)', data=df).fit()\nsm.stats.anova_lm(model, typ=3)\n```\n\n::: {.cell-output .cell-output-display execution_count=4}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
sum_sqdfFPR(>F)
Intercept20037.6130111.0181.4137881.417921e-17
C(drug, Sum)2997.4718603.09.0460338.086388e-05
C(disease, Sum)415.8730462.01.8825871.637355e-01
C(drug, Sum):C(disease, Sum)707.2662596.01.0672253.958458e-01
Residual5080.81666746.0NaNNaN
\n
\n```\n:::\n:::\n\n\n### Type IV\n\nThere is no Type IV sum of squares calculation in Python similiar to SAS.\n\n", + "markdown": "---\ntitle: \"ANOVA\"\n---\n\n## Introduction\n\nAnalysis of VAriance *(ANOVA)* is a statistical test to measure the difference between means of more than two groups.It is best suited when the data is normally distributed. By partitioning total variance into components, ANOVA unravels relationship between variables and identifies the true source of variation. It can handle multiple factors and their interactions, providing a robust way to better understand intricate relationships.\n\n## Anova Test in Python\n\nTo perform a one-way ANOVA test in Python we can use the **f_oneway()** function from SciPy library.\nSimilarly, to perform two-way ANOVA test **anova_lm()** function from the statsmodel library is frequently used.\n\nFor this test, we’ll create a data frame called df_disease taken from the SAS documentation. The corresponding data can be found [here](https://github.com/PSIAIMS/CAMIS/blob/main/data/sas_disease.csv). In this experiment, we are trying to find the impact of different drug and disease group on the `stem-length` \n\n::: {#09cd8844 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\n\n# Read the sample data\ndf = pd.read_csv(\"../data/sas_disease.csv\")\n\n\n#perform two-way ANOVA\nmodel = ols('y ~ C(drug) + C(disease) + C(drug):C(disease)', data=df).fit()\nsm.stats.anova_lm(model, typ=2)\n```\n\n::: {.cell-output .cell-output-display execution_count=1}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
sum_sqdfFPR(>F)
C(drug)3063.4328633.09.2450960.000067
C(disease)418.8337412.01.8959900.161720
C(drug):C(disease)707.2662596.01.0672250.395846
Residual5080.81666746.0NaNNaN
\n
\n```\n:::\n:::\n\n\n## Sum of Squares Tables\n\n### Type I\n\n::: {#20021af3 .cell execution_count=2}\n``` {.python .cell-code}\nmodel = ols('y ~ C(drug) + C(disease) + C(drug):C(disease)', data=df).fit()\nsm.stats.anova_lm(model)\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
dfsum_sqmean_sqFPR(>F)
C(drug)3.03133.2385061044.4128359.4557610.000056
C(disease)2.0418.833741209.4168701.8959900.161720
C(drug):C(disease)6.0707.266259117.8777101.0672250.395846
Residual46.05080.816667110.452536NaNNaN
\n
\n```\n:::\n:::\n\n\n### Type II\n\n::: {#e9a760ce .cell execution_count=3}\n``` {.python .cell-code}\nmodel = ols('y ~ C(drug) + C(disease) + C(drug):C(disease)', data=df).fit()\nsm.stats.anova_lm(model, typ=2)\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
sum_sqdfFPR(>F)
C(drug)3063.4328633.09.2450960.000067
C(disease)418.8337412.01.8959900.161720
C(drug):C(disease)707.2662596.01.0672250.395846
Residual5080.81666746.0NaNNaN
\n
\n```\n:::\n:::\n\n\n### Type III\n\n::: {#3279b32d .cell execution_count=4}\n``` {.python .cell-code}\nmodel = ols('y ~ C(drug,Sum) + C(disease,Sum) + C(drug,Sum):C(disease,Sum)', data=df).fit()\nsm.stats.anova_lm(model, typ=3)\n```\n\n::: {.cell-output .cell-output-display execution_count=4}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
sum_sqdfFPR(>F)
Intercept20037.6130111.0181.4137881.417921e-17
C(drug, Sum)2997.4718603.09.0460338.086388e-05
C(disease, Sum)415.8730462.01.8825871.637355e-01
C(drug, Sum):C(disease, Sum)707.2662596.01.0672253.958458e-01
Residual5080.81666746.0NaNNaN
\n
\n```\n:::\n:::\n\n\n### Type IV\n\nThere is no Type IV sum of squares calculation in Python similiar to SAS.\n\n", "supporting": [ - "anova_files/figure-html" + "anova_files" ], "filters": [], "includes": { "include-in-header": [ - "\n\n\n" + "\n\n\n" ] } } diff --git a/_freeze/python/binomial_test/execute-results/html.json b/_freeze/python/binomial_test/execute-results/html.json index a8dfe08c8..b1803e05b 100644 --- a/_freeze/python/binomial_test/execute-results/html.json +++ b/_freeze/python/binomial_test/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "c842ce058c30139f4c5029deb81db12c", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Binomial Test\"\nformat: html\neditor: visual\n---\n\n\nThe statistical test used to determine whether the proportion in a binary outcome experiment is equal to a specific value. It is appropriate when we have a small sample size and want to test the success probability $p$ against a hypothesized value $p_0$.\n\n## Creating a sample dataset\n\n- We will generate a dataset where we record the outcomes of 1000 coin flips.\n\n- We will use the `binom.test` function to test if the proportion of heads is significantly different from 0.5.\n\n::: {#042d3c01 .cell execution_count=1}\n``` {.python .cell-code}\nimport numpy as np\nfrom scipy.stats import binomtest\n\n# Set seed for reproducibility\nnp.random.seed(19)\ncoin_flips = np.random.choice(['H', 'T'], size=1000, replace=True, p=[0.5, 0.5])\n```\n:::\n\n\nNow, we will count the heads and tails and summarize the data.\n\n::: {#ca08761c .cell execution_count=2}\n``` {.python .cell-code}\n# Count heads and tails\nheads_count = np.sum(coin_flips == 'H')\ntails_count = np.sum(coin_flips == 'T')\ntotal_flips = len(coin_flips)\n\nheads_count, tails_count, total_flips\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```\n(np.int64(523), np.int64(477), 1000)\n```\n:::\n:::\n\n\n## Conducting Binomial Test\n\n::: {#148776f0 .cell execution_count=3}\n``` {.python .cell-code}\n# Perform the binomial test\nbinom_test_result = binomtest(heads_count, total_flips, p=0.5)\nbinom_test_result\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```\nBinomTestResult(k=523, n=1000, alternative='two-sided', statistic=0.523, pvalue=0.15469370647995673)\n```\n:::\n:::\n\n\n### Results:\n\nThe output has a p-value `py binom_test_result` $> 0.05$ (chosen level of significance). Hence, we fail to reject the null hypothesis and conclude that the **coin is fair**.\n\n# Example of Clinical Trial Data\n\nWe load the `lung` dataset from `survival` package. We want to test if the proportion of patients with survival status 1 (dead) is significantly different from a hypothesized proportion (e.g. 50%)\n\nWe will calculate number of deaths and total number of patients.\n\n::: {#1ffd449e .cell execution_count=4}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Load the lung cancer dataset from CSV file\nlung = pd.read_csv('../data/lung_cancer.csv')\n\n# Calculate the number of deaths and total number of patients\nnum_deaths = np.sum(lung['status'] == 1)\ntotal_pat = lung.shape[0]\n\nnum_deaths, total_pat\n```\n\n::: {.cell-output .cell-output-display execution_count=4}\n```\n(np.int64(63), 228)\n```\n:::\n:::\n\n\n## Conduct the Binomial Test\n\nWe will conduct the Binomial test and hypothesize that the proportion of death should be 19%.\n\n::: {#3ab62f62 .cell execution_count=5}\n``` {.python .cell-code}\n# Perform the binomial test\nbinom_test_clinical = binomtest(num_deaths, total_pat, p=0.19)\nbinom_test_clinical\n```\n\n::: {.cell-output .cell-output-display execution_count=5}\n```\nBinomTestResult(k=63, n=228, alternative='two-sided', statistic=0.27631578947368424, pvalue=0.0016828878642599632)\n```\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value `py binom_test_clinical` $< 0.05$ (chosen level of significance). Hence, we reject the null hypothesis and conclude that **the propotion of death is significantly different from 19%**.\n\n", + "markdown": "---\ntitle: \"Binomial Test\"\nformat: html\neditor: visual\n---\n\nThe statistical test used to determine whether the proportion in a binary outcome experiment is equal to a specific value. It is appropriate when we have a small sample size and want to test the success probability $p$ against a hypothesized value $p_0$.\n\n## Creating a sample dataset\n\n- We will generate a dataset where we record the outcomes of 1000 coin flips.\n\n- We will use the `binom.test` function to test if the proportion of heads is significantly different from 0.5.\n\n::: {#21aec90b .cell execution_count=1}\n``` {.python .cell-code}\nimport numpy as np\nfrom scipy.stats import binomtest\n\n# Set seed for reproducibility\nnp.random.seed(19)\ncoin_flips = np.random.choice(['H', 'T'], size=1000, replace=True, p=[0.5, 0.5])\n```\n:::\n\n\nNow, we will count the heads and tails and summarize the data.\n\n::: {#5952dc0d .cell execution_count=2}\n``` {.python .cell-code}\n# Count heads and tails\nheads_count = np.sum(coin_flips == 'H')\ntails_count = np.sum(coin_flips == 'T')\ntotal_flips = len(coin_flips)\n\nheads_count, tails_count, total_flips\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```\n(np.int64(523), np.int64(477), 1000)\n```\n:::\n:::\n\n\n## Conducting Binomial Test\n\n::: {#b22547ab .cell execution_count=3}\n``` {.python .cell-code}\n# Perform the binomial test\nbinom_test_result = binomtest(heads_count, total_flips, p=0.5)\nbinom_test_result\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```\nBinomTestResult(k=523, n=1000, alternative='two-sided', statistic=0.523, pvalue=0.15469370647995673)\n```\n:::\n:::\n\n\n### Results:\n\nThe output has a p-value `py binom_test_result` $> 0.05$ (chosen level of significance). Hence, we fail to reject the null hypothesis and conclude that the **coin is fair**.\n\n# Example of Clinical Trial Data\n\nWe load the `lung` dataset from `survival` package. We want to test if the proportion of patients with survival status 1 (dead) is significantly different from a hypothesized proportion (e.g. 50%)\n\nWe will calculate number of deaths and total number of patients.\n\n::: {#90beaa49 .cell execution_count=4}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Load the lung cancer dataset from CSV file\nlung = pd.read_csv('../data/lung_cancer.csv')\n\n# Calculate the number of deaths and total number of patients\nnum_deaths = np.sum(lung['status'] == 1)\ntotal_pat = lung.shape[0]\n\nnum_deaths, total_pat\n```\n\n::: {.cell-output .cell-output-display execution_count=4}\n```\n(np.int64(63), 228)\n```\n:::\n:::\n\n\n## Conduct the Binomial Test\n\nWe will conduct the Binomial test and hypothesize that the proportion of death should be 19%.\n\n::: {#be55a3d3 .cell execution_count=5}\n``` {.python .cell-code}\n# Perform the binomial test\nbinom_test_clinical = binomtest(num_deaths, total_pat, p=0.19)\nbinom_test_clinical\n```\n\n::: {.cell-output .cell-output-display execution_count=5}\n```\nBinomTestResult(k=63, n=228, alternative='two-sided', statistic=0.27631578947368424, pvalue=0.0016828878642599632)\n```\n:::\n:::\n\n\n## Results:\n\nThe output has a p-value `py binom_test_clinical` $< 0.05$ (chosen level of significance). Hence, we reject the null hypothesis and conclude that **the propotion of death is significantly different from 19%**.\n\n", "supporting": [ "binomial_test_files" ], diff --git a/_freeze/python/chi-square/execute-results/html.json b/_freeze/python/chi-square/execute-results/html.json index 02a97c04e..e687a4dc2 100644 --- a/_freeze/python/chi-square/execute-results/html.json +++ b/_freeze/python/chi-square/execute-results/html.json @@ -2,14 +2,14 @@ "hash": "ecbe2d80923f413bfeb5ead621c0a4f9", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Chi-Square Association/Fisher's exact\"\n---\n\n\n## Introduction\n\nThe chi-square test is a non-parametric statistical test used to determine whether there is a significant association within the categorical variables. It compares the observed frequencies in a contingency table with the frequency we would expect if the variables were independent. The chi-square test calculates a test statistic, often denoted as χ² (chi-square), which follows chi-square distribution, we can determine whether the association between the variables are statistically significant.\n\nThe chi-squared test and Fisher's exact test can assess for independence between two variables when the comparing groups are independent and not correlated. The chi-squared test applies an approximation assuming the sample is large, while the Fisher's exact test runs an exact procedure especially for small-sized samples.\n\n## Data used \n\nTo perform the analysis the data used is: *Loprinzi CL. Laurie JA. Wieand HS. Krook JE. Novotny PJ. Kugler JW. Bartel J. Law M. Bateman M. Klatt NE. et al. Prospective evaluation of prognostic variables from patient-completed questionnaires. North Central Cancer Treatment Group. Journal of Clinical Oncology. 12(3):601-7, 1994*.\n\n## Implementing Chi-Square test in Python\n\n We can use crosstab() function to create contingency table of two selected variables.\n\n::: {#17b645eb .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd \nimport numpy as np\nimport scipy.stats as stats \n\n# Read the sample data\ndata = pd.read_csv(\"../data/lung_cancer.csv\") \n\n# Removing undesired rows\ndf= data.dropna(subset=['ph.ecog','wt.loss']) \n\n# Converting numerical variable into categorical variable\n\ndf['ecog_grp']= np.where(df['ph.ecog']>0, \"fully active\",\"symptomatic\")\nprint(df['ecog_grp'])\ndf['wt_grp'] = np.where(df['wt.loss']>0, \"weight loss\", \"weight gain\")\n\ncontingency_table= pd.crosstab(df['ecog_grp'],df['wt_grp'])\ncontingency_table\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n1 symptomatic\n2 symptomatic\n3 fully active\n4 symptomatic\n5 fully active\n ... \n223 fully active\n224 symptomatic\n225 fully active\n226 fully active\n227 fully active\nName: ecog_grp, Length: 213, dtype: object\n```\n:::\n\n::: {.cell-output .cell-output-stderr}\n```\n/var/folders/wv/j23_bm7d0mn77bx35v5mgl_00000gn/T/ipykernel_56901/2909872460.py:13: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df['ecog_grp']= np.where(df['ph.ecog']>0, \"fully active\",\"symptomatic\")\n/var/folders/wv/j23_bm7d0mn77bx35v5mgl_00000gn/T/ipykernel_56901/2909872460.py:15: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df['wt_grp'] = np.where(df['wt.loss']>0, \"weight loss\", \"weight gain\")\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=1}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
wt_grpweight gainweight loss
ecog_grp
fully active39113
symptomatic2239
\n
\n```\n:::\n:::\n\n\nFurthermore, the chi2_contingency() function in scipy.stats library in Python can be used to implement Chi-square test. \n\n::: {#f79a389d .cell execution_count=2}\n``` {.python .cell-code}\n# Parsing the values from the contingency table\nvalue = np.array([contingency_table.iloc[0][0:5].values,\n contingency_table.iloc[1][0:5].values])\n\nstatistic, p, dof, expected = stats.chi2_contingency(value)\n\nprint(\"The chi2 value is:\", statistic)\nprint(\"The p value is:\", p)\nprint(\"The degree of freedom is:\", dof)\nprint(\"The expected values are:\", expected)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nThe chi2 value is: 1.8260529076055192\nThe p value is: 0.17659446865934617\nThe degree of freedom is: 1\nThe expected values are: [[ 43.53051643 108.46948357]\n [ 17.46948357 43.53051643]]\n```\n:::\n:::\n\n\n## Implementing Fisher exact test in Python\n\nTo implement Fischer's exact test in Python, we can use the fischer_exact() function from the stats module in SciPy library. It returns *SignificanceResult* object with statistic and pvalue as it's attributes.\n\n::: {#b57d5ac6 .cell execution_count=3}\n``` {.python .cell-code}\nstats.fisher_exact(value, alternative=\"two-sided\")\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```\nSignificanceResult(statistic=np.float64(0.6118262268704746), pvalue=np.float64(0.13500579984749855))\n```\n:::\n:::\n\n\n", + "markdown": "---\ntitle: \"Chi-Square Association/Fisher's exact\"\n---\n\n## Introduction\n\nThe chi-square test is a non-parametric statistical test used to determine whether there is a significant association within the categorical variables. It compares the observed frequencies in a contingency table with the frequency we would expect if the variables were independent. The chi-square test calculates a test statistic, often denoted as χ² (chi-square), which follows chi-square distribution, we can determine whether the association between the variables are statistically significant.\n\nThe chi-squared test and Fisher's exact test can assess for independence between two variables when the comparing groups are independent and not correlated. The chi-squared test applies an approximation assuming the sample is large, while the Fisher's exact test runs an exact procedure especially for small-sized samples.\n\n## Data used \n\nTo perform the analysis the data used is: *Loprinzi CL. Laurie JA. Wieand HS. Krook JE. Novotny PJ. Kugler JW. Bartel J. Law M. Bateman M. Klatt NE. et al. Prospective evaluation of prognostic variables from patient-completed questionnaires. North Central Cancer Treatment Group. Journal of Clinical Oncology. 12(3):601-7, 1994*.\n\n## Implementing Chi-Square test in Python\n\n We can use crosstab() function to create contingency table of two selected variables.\n\n::: {#f2330c25 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd \nimport numpy as np\nimport scipy.stats as stats \n\n# Read the sample data\ndata = pd.read_csv(\"../data/lung_cancer.csv\") \n\n# Removing undesired rows\ndf= data.dropna(subset=['ph.ecog','wt.loss']) \n\n# Converting numerical variable into categorical variable\n\ndf['ecog_grp']= np.where(df['ph.ecog']>0, \"fully active\",\"symptomatic\")\nprint(df['ecog_grp'])\ndf['wt_grp'] = np.where(df['wt.loss']>0, \"weight loss\", \"weight gain\")\n\ncontingency_table= pd.crosstab(df['ecog_grp'],df['wt_grp'])\ncontingency_table\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n1 symptomatic\n2 symptomatic\n3 fully active\n4 symptomatic\n5 fully active\n ... \n223 fully active\n224 symptomatic\n225 fully active\n226 fully active\n227 fully active\nName: ecog_grp, Length: 213, dtype: object\n```\n:::\n\n::: {.cell-output .cell-output-stderr}\n```\n/tmp/ipykernel_35731/2909872460.py:13: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df['ecog_grp']= np.where(df['ph.ecog']>0, \"fully active\",\"symptomatic\")\n/tmp/ipykernel_35731/2909872460.py:15: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df['wt_grp'] = np.where(df['wt.loss']>0, \"weight loss\", \"weight gain\")\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=1}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
wt_grpweight gainweight loss
ecog_grp
fully active39113
symptomatic2239
\n
\n```\n:::\n:::\n\n\nFurthermore, the chi2_contingency() function in scipy.stats library in Python can be used to implement Chi-square test. \n\n::: {#315c974a .cell execution_count=2}\n``` {.python .cell-code}\n# Parsing the values from the contingency table\nvalue = np.array([contingency_table.iloc[0][0:5].values,\n contingency_table.iloc[1][0:5].values])\n\nstatistic, p, dof, expected = stats.chi2_contingency(value)\n\nprint(\"The chi2 value is:\", statistic)\nprint(\"The p value is:\", p)\nprint(\"The degree of freedom is:\", dof)\nprint(\"The expected values are:\", expected)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nThe chi2 value is: 1.8260529076055192\nThe p value is: 0.17659446865934614\nThe degree of freedom is: 1\nThe expected values are: [[ 43.53051643 108.46948357]\n [ 17.46948357 43.53051643]]\n```\n:::\n:::\n\n\n## Implementing Fisher exact test in Python\n\nTo implement Fischer's exact test in Python, we can use the fischer_exact() function from the stats module in SciPy library. It returns *SignificanceResult* object with statistic and pvalue as it's attributes.\n\n::: {#a809c6be .cell execution_count=3}\n``` {.python .cell-code}\nstats.fisher_exact(value, alternative=\"two-sided\")\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```\nSignificanceResult(statistic=np.float64(0.6118262268704746), pvalue=np.float64(0.13500579984749855))\n```\n:::\n:::\n\n\n", "supporting": [ - "chi-square_files/figure-html" + "chi-square_files" ], "filters": [], "includes": { "include-in-header": [ - "\n\n\n" + "\n\n\n" ] } } diff --git a/_freeze/python/correlation/execute-results/html.json b/_freeze/python/correlation/execute-results/html.json index 9e9f6eb91..d5f441619 100644 --- a/_freeze/python/correlation/execute-results/html.json +++ b/_freeze/python/correlation/execute-results/html.json @@ -2,9 +2,9 @@ "hash": "ec4ce86670c01a71adc78913395c2d36", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Correlation Analysis in Python\"\n---\n\n\nCorrelation analyses measures the strength of the relationship between two variables. Correlation analyses can be used to test for associations in hypothesis testing. If the null hypothesis states there is correlation between the variables considered under the study. Then, the purpose is to investigate the possible association in the underlying variables.\n\nThe *Scipy* library in Python encompasees *Stats* module with **pearsonr()**, **kendalltau()**, **spearsmanr()** function to evaluate Pearson, Kendall and Spearsman Correlation co-efficient respectively.\n\n### Pearson's Correlation\n\nIt is a parametric correlation test because it depends on the distribution of data. It measures the linear dependence between two variables x and y. It is the ratio between the covariance of two variables and the product of their standard deviation. The result always have a value between 1 and -1.\n\n::: {#05c1e743 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import pearsonr\n\n# Read the sample data\ndf = pd.read_csv(\"../data/NCCTG_Lung_Cancer_Data_535_29.csv\")\n\n# Convert dataframe into series\nx = df['age']\ny = df['meal.cal']\n\n# Apply the pearsonr()\ncorr, _ = pearsonr(x, y)\nprint('Pearsons correlation: %.3f' % corr)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nPearsons correlation: -0.240\n```\n:::\n:::\n\n\n### Kendall's Rank\n\nA τ test is a non-parametric hypothesis test for statistical dependence based on the τ coefficient. It is a measure of rank correlation: the similarity of the orderings of the data when ranked by each of the quantities. The Kendall correlation between two variables will be high when observations have a similar (or identical for a correlation of 1) rank (i.e. relative position label of the observations within the variable: 1st, 2nd, 3rd, etc.) between the two variables, and low when observations have a dissimilar (or fully different for a correlation of −1) rank between the two variables.\n\n::: {#a36b3668 .cell execution_count=2}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import kendalltau \n\n# Read the sample data\ndf = pd.read_csv(\"../data/NCCTG_Lung_Cancer_Data_535_29.csv\")\n \n# Convert dataframe into series\nx=df['age']\ny = df['meal.cal']\n\n\n# Calculating Kendall Rank correlation \ncorr, _ = kendalltau(x, y) \nprint('Kendall Rank correlation: %.5f' % corr) \n```\n\n::: {.cell-output .cell-output-stdout}\n```\nKendall Rank correlation: -0.14581\n```\n:::\n:::\n\n\n### Spearman's Rank\n\nSpearman’s Rank Correlation is a statistical measure of the strength and direction of the monotonic relationship between two continuous variables. Therefore, these attributes are ranked or put in the order of their preference. It is denoted by the symbol “rho” (ρ) and can take values between -1 to +1. A positive value of rho indicates that there exists a positive relationship between the two variables, while a negative value of rho indicates a negative relationship. A rho value of 0 indicates no association between the two variables.\n\n::: {#f068f187 .cell execution_count=3}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import spearmanr\n \n# Read the sample data\ndf= pd.read_csv(\"../data/NCCTG_Lung_Cancer_Data_535_29.csv\")\n\n# Convert dataframe into series\nx = df['age']\ny = df['meal.cal']\n \n# calculate Spearman's correlation coefficient and p-value\ncorr, pval = spearmanr(x, y)\n \n# print the result\nprint(\"Spearman's correlation coefficient:\", corr)\nprint(\"p-value:\", pval)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nSpearman's correlation coefficient: -0.21159256066613205\np-value: 0.006051240282927171\n```\n:::\n:::\n\n\n", + "markdown": "---\ntitle: \"Correlation Analysis in Python\"\n---\n\nCorrelation analyses measures the strength of the relationship between two variables. Correlation analyses can be used to test for associations in hypothesis testing. If the null hypothesis states there is correlation between the variables considered under the study. Then, the purpose is to investigate the possible association in the underlying variables.\n\nThe *Scipy* library in Python encompasees *Stats* module with **pearsonr()**, **kendalltau()**, **spearsmanr()** function to evaluate Pearson, Kendall and Spearsman Correlation co-efficient respectively.\n\n### Pearson's Correlation\n\nIt is a parametric correlation test because it depends on the distribution of data. It measures the linear dependence between two variables x and y. It is the ratio between the covariance of two variables and the product of their standard deviation. The result always have a value between 1 and -1.\n\n::: {#d03f6d98 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import pearsonr\n\n# Read the sample data\ndf = pd.read_csv(\"../data/NCCTG_Lung_Cancer_Data_535_29.csv\")\n\n# Convert dataframe into series\nx = df['age']\ny = df['meal.cal']\n\n# Apply the pearsonr()\ncorr, _ = pearsonr(x, y)\nprint('Pearsons correlation: %.3f' % corr)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nPearsons correlation: -0.240\n```\n:::\n:::\n\n\n### Kendall's Rank\n\nA τ test is a non-parametric hypothesis test for statistical dependence based on the τ coefficient. It is a measure of rank correlation: the similarity of the orderings of the data when ranked by each of the quantities. The Kendall correlation between two variables will be high when observations have a similar (or identical for a correlation of 1) rank (i.e. relative position label of the observations within the variable: 1st, 2nd, 3rd, etc.) between the two variables, and low when observations have a dissimilar (or fully different for a correlation of −1) rank between the two variables.\n\n::: {#fa18fc3b .cell execution_count=2}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import kendalltau \n\n# Read the sample data\ndf = pd.read_csv(\"../data/NCCTG_Lung_Cancer_Data_535_29.csv\")\n \n# Convert dataframe into series\nx=df['age']\ny = df['meal.cal']\n\n\n# Calculating Kendall Rank correlation \ncorr, _ = kendalltau(x, y) \nprint('Kendall Rank correlation: %.5f' % corr) \n```\n\n::: {.cell-output .cell-output-stdout}\n```\nKendall Rank correlation: -0.14581\n```\n:::\n:::\n\n\n### Spearman's Rank\n\nSpearman’s Rank Correlation is a statistical measure of the strength and direction of the monotonic relationship between two continuous variables. Therefore, these attributes are ranked or put in the order of their preference. It is denoted by the symbol “rho” (ρ) and can take values between -1 to +1. A positive value of rho indicates that there exists a positive relationship between the two variables, while a negative value of rho indicates a negative relationship. A rho value of 0 indicates no association between the two variables.\n\n::: {#f74b19f0 .cell execution_count=3}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import spearmanr\n \n# Read the sample data\ndf= pd.read_csv(\"../data/NCCTG_Lung_Cancer_Data_535_29.csv\")\n\n# Convert dataframe into series\nx = df['age']\ny = df['meal.cal']\n \n# calculate Spearman's correlation coefficient and p-value\ncorr, pval = spearmanr(x, y)\n \n# print the result\nprint(\"Spearman's correlation coefficient:\", corr)\nprint(\"p-value:\", pval)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nSpearman's correlation coefficient: -0.21159256066613205\np-value: 0.006051240282927171\n```\n:::\n:::\n\n\n", "supporting": [ - "correlation_files/figure-html" + "correlation_files" ], "filters": [], "includes": {} diff --git a/_freeze/python/kruskal_wallis/execute-results/html.json b/_freeze/python/kruskal_wallis/execute-results/html.json index 78eb78f67..c3ccc6f06 100644 --- a/_freeze/python/kruskal_wallis/execute-results/html.json +++ b/_freeze/python/kruskal_wallis/execute-results/html.json @@ -2,9 +2,9 @@ "hash": "d0bd6fcff98362c031f616cea69f9598", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Kruskal Wallis in Python\"\n---\n\n\n## Introduction\n\nThe Kruskal-Wallis test is a non-parametric equivalent to the one-way ANOVA. For this example, the data used is a subset of the iris dataset, testing for difference in sepal width between species of flower.\n\n::: {#360f8ea0 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Define the data\ndata = {\n 'Species': ['setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica'],\n 'Sepal_Width': [3.4, 3.0, 3.4, 3.2, 3.5, 3.1, 2.7, 2.9, 2.7, 2.6, 2.5, 2.5, 3.0, 3.0, 3.1, 3.8, 2.7, 3.3]\n}\n\n# Create the DataFrame\niris_sub = pd.DataFrame(data)\n\n# Print the DataFrame\nprint(iris_sub)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n Species Sepal_Width\n0 setosa 3.4\n1 setosa 3.0\n2 setosa 3.4\n3 setosa 3.2\n4 setosa 3.5\n5 setosa 3.1\n6 versicolor 2.7\n7 versicolor 2.9\n8 versicolor 2.7\n9 versicolor 2.6\n10 versicolor 2.5\n11 versicolor 2.5\n12 virginica 3.0\n13 virginica 3.0\n14 virginica 3.1\n15 virginica 3.8\n16 virginica 2.7\n17 virginica 3.3\n```\n:::\n:::\n\n\n## Implementing Kruskal-Wallis in Python\n\nThe Kruskal-Wallis test can be implemented in Python using the kruskal function from scipy.stats. The null hypothesis is that the samples are from identical populations.\n\n::: {#4639de63 .cell execution_count=2}\n``` {.python .cell-code}\nfrom scipy.stats import kruskal\n\n# Separate the data for each species\nsetosa_data = iris_sub[iris_sub['Species'] == 'setosa']['Sepal_Width']\nversicolor_data = iris_sub[iris_sub['Species'] == 'versicolor']['Sepal_Width']\nvirginica_data = iris_sub[iris_sub['Species'] == 'virginica']['Sepal_Width']\n\n# Perform the Kruskal-Wallis H-test\nh_statistic, p_value = kruskal(setosa_data, versicolor_data, virginica_data)\n\n# Calculate the degrees of freedom\nk = len(iris_sub['Species'].unique())\ndf = k - 1\n\nprint(\"H-statistic:\", h_statistic)\nprint(\"p-value:\", p_value)\nprint(\"Degrees of freedom:\", df)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nH-statistic: 10.922233820459285\np-value: 0.0042488075570347485\nDegrees of freedom: 2\n```\n:::\n:::\n\n\n## Results\n\nAs seen above, Python outputs the Kruskal-Wallis rank sum statistic (10.922), the degrees of freedom (2), and the p-value of the test (0.004249). Therefore, the difference in population medians is statistically significant at the 5% level.\n\n", + "markdown": "---\ntitle: \"Kruskal Wallis in Python\"\n---\n\n## Introduction\n\nThe Kruskal-Wallis test is a non-parametric equivalent to the one-way ANOVA. For this example, the data used is a subset of the iris dataset, testing for difference in sepal width between species of flower.\n\n::: {#b319644b .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Define the data\ndata = {\n 'Species': ['setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica'],\n 'Sepal_Width': [3.4, 3.0, 3.4, 3.2, 3.5, 3.1, 2.7, 2.9, 2.7, 2.6, 2.5, 2.5, 3.0, 3.0, 3.1, 3.8, 2.7, 3.3]\n}\n\n# Create the DataFrame\niris_sub = pd.DataFrame(data)\n\n# Print the DataFrame\nprint(iris_sub)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n Species Sepal_Width\n0 setosa 3.4\n1 setosa 3.0\n2 setosa 3.4\n3 setosa 3.2\n4 setosa 3.5\n5 setosa 3.1\n6 versicolor 2.7\n7 versicolor 2.9\n8 versicolor 2.7\n9 versicolor 2.6\n10 versicolor 2.5\n11 versicolor 2.5\n12 virginica 3.0\n13 virginica 3.0\n14 virginica 3.1\n15 virginica 3.8\n16 virginica 2.7\n17 virginica 3.3\n```\n:::\n:::\n\n\n## Implementing Kruskal-Wallis in Python\n\nThe Kruskal-Wallis test can be implemented in Python using the kruskal function from scipy.stats. The null hypothesis is that the samples are from identical populations.\n\n::: {#db047417 .cell execution_count=2}\n``` {.python .cell-code}\nfrom scipy.stats import kruskal\n\n# Separate the data for each species\nsetosa_data = iris_sub[iris_sub['Species'] == 'setosa']['Sepal_Width']\nversicolor_data = iris_sub[iris_sub['Species'] == 'versicolor']['Sepal_Width']\nvirginica_data = iris_sub[iris_sub['Species'] == 'virginica']['Sepal_Width']\n\n# Perform the Kruskal-Wallis H-test\nh_statistic, p_value = kruskal(setosa_data, versicolor_data, virginica_data)\n\n# Calculate the degrees of freedom\nk = len(iris_sub['Species'].unique())\ndf = k - 1\n\nprint(\"H-statistic:\", h_statistic)\nprint(\"p-value:\", p_value)\nprint(\"Degrees of freedom:\", df)\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nH-statistic: 10.922233820459285\np-value: 0.0042488075570347485\nDegrees of freedom: 2\n```\n:::\n:::\n\n\n## Results\n\nAs seen above, Python outputs the Kruskal-Wallis rank sum statistic (10.922), the degrees of freedom (2), and the p-value of the test (0.004249). Therefore, the difference in population medians is statistically significant at the 5% level.\n\n", "supporting": [ - "kruskal_wallis_files/figure-html" + "kruskal_wallis_files" ], "filters": [], "includes": {} diff --git a/_freeze/python/linear_regression/execute-results/html.json b/_freeze/python/linear_regression/execute-results/html.json index f8339a4e9..2582cbd29 100644 --- a/_freeze/python/linear_regression/execute-results/html.json +++ b/_freeze/python/linear_regression/execute-results/html.json @@ -2,14 +2,14 @@ "hash": "7617d2aae333897b0a2a92d132973902", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Linear Regression\"\noutput: html_document\n---\n\n\nTo demonstrate the use of linear regression we examine a dataset that illustrates the relationship between Height and Weight in a group of 237 teen-aged boys and girls. The dataset is available [here](https://github.com/PSIAIMS/CAMIS/blob/3dca6398dca4f242eb0f0c316d7499eaba8adb13/data/htwt.csv) and is imported to the workspace.\n\n### Descriptive Statistics\n\nThe first step is to obtain the simple descriptive statistics for the numeric variables of htwt data, and one-way frequencies for categorical variables. This is accomplished by employing summary function. There are 237 participants who are from 13.9 to 25 years old. It is a cross-sectional study, with each participant having one observation. We can use this data set to examine the relationship of participants' height to their age and sex.\n\n::: {#d6aadc80 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nimport statsmodels.api as sm\n\n# Importing CSV\nhtwt = pd.read_csv(\"../data/htwt.csv\")\n```\n:::\n\n\nIn order to create a regression model to demonstrate the relationship between age and height for females, we first need to create a flag variable identifying females and an interaction variable between age and female gender flag.\n\n::: {#745cd356 .cell execution_count=2}\n``` {.python .cell-code}\nhtwt['female'] = (htwt['SEX'] == 'f').astype(int)\nhtwt['fem_age'] = htwt['AGE'] * htwt['female']\nhtwt.head()\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ROWSEXAGEHEIGHTWEIGHTfemalefem_age
01f14.356.385.0114.3
12f15.562.3105.0115.5
23f15.363.3108.0115.3
34f16.159.092.0116.1
45f19.162.5112.5119.1
\n
\n```\n:::\n:::\n\n\n### Regression Analysis\n\nNext, we fit a regression model, representing the relationships between gender, age, height and the interaction variable created in the datastep above. We again use a where statement to restrict the analysis to those who are less than or equal to 19 years old. We use the clb option to get a 95% confidence interval for each of the parameters in the model. The model that we are fitting is ***height = b0 + b1 x female + b2 x age + b3 x fem_age + e***\n\n::: {#7030b50e .cell execution_count=3}\n``` {.python .cell-code}\nX = htwt[['female', 'AGE', 'fem_age']][htwt['AGE'] <= 19]\nX = sm.add_constant(X)\nY = htwt['HEIGHT'][htwt['AGE'] <= 19]\n\nmodel = sm.OLS(Y, X).fit()\n\nmodel.summary()\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```{=html}\n\n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n
OLS Regression Results
Dep. Variable: HEIGHT R-squared: 0.460
Model: OLS Adj. R-squared: 0.452
Method: Least Squares F-statistic: 60.93
Date: Wed, 20 Aug 2025 Prob (F-statistic): 1.50e-28
Time: 14:01:28 Log-Likelihood: -534.17
No. Observations: 219 AIC: 1076.
Df Residuals: 215 BIC: 1090.
Df Model: 3
Covariance Type: nonrobust
\n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n
coef std err t P>|t| [0.025 0.975]
const 28.8828 2.873 10.052 0.000 23.219 34.547
female 13.6123 4.019 3.387 0.001 5.690 21.534
AGE 2.0313 0.178 11.435 0.000 1.681 2.381
fem_age -0.9294 0.248 -3.750 0.000 -1.418 -0.441
\n\n\n \n\n\n \n\n\n \n\n\n \n\n
Omnibus: 1.300 Durbin-Watson: 2.284
Prob(Omnibus): 0.522 Jarque-Bera (JB): 0.981
Skew: -0.133 Prob(JB): 0.612
Kurtosis: 3.191 Cond. No. 450.


Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n```\n:::\n:::\n\n\nFrom the coefficients table b0,b1,b2,b3 are estimated as b0=28.88 b1=13.61 b2=2.03 b3=-0.92942\n\nThe resulting regression model for height, age and gender based on the available data is ***height=28.8828 + 13.6123 x female + 2.0313 x age -0.9294 x fem_age***\n\n", + "markdown": "---\ntitle: \"Linear Regression\"\noutput: html_document\n---\n\nTo demonstrate the use of linear regression we examine a dataset that illustrates the relationship between Height and Weight in a group of 237 teen-aged boys and girls. The dataset is available [here](https://github.com/PSIAIMS/CAMIS/blob/3dca6398dca4f242eb0f0c316d7499eaba8adb13/data/htwt.csv) and is imported to the workspace.\n\n### Descriptive Statistics\n\nThe first step is to obtain the simple descriptive statistics for the numeric variables of htwt data, and one-way frequencies for categorical variables. This is accomplished by employing summary function. There are 237 participants who are from 13.9 to 25 years old. It is a cross-sectional study, with each participant having one observation. We can use this data set to examine the relationship of participants' height to their age and sex.\n\n::: {#10574582 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nimport statsmodels.api as sm\n\n# Importing CSV\nhtwt = pd.read_csv(\"../data/htwt.csv\")\n```\n:::\n\n\nIn order to create a regression model to demonstrate the relationship between age and height for females, we first need to create a flag variable identifying females and an interaction variable between age and female gender flag.\n\n::: {#033c2341 .cell execution_count=2}\n``` {.python .cell-code}\nhtwt['female'] = (htwt['SEX'] == 'f').astype(int)\nhtwt['fem_age'] = htwt['AGE'] * htwt['female']\nhtwt.head()\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ROWSEXAGEHEIGHTWEIGHTfemalefem_age
01f14.356.385.0114.3
12f15.562.3105.0115.5
23f15.363.3108.0115.3
34f16.159.092.0116.1
45f19.162.5112.5119.1
\n
\n```\n:::\n:::\n\n\n### Regression Analysis\n\nNext, we fit a regression model, representing the relationships between gender, age, height and the interaction variable created in the datastep above. We again use a where statement to restrict the analysis to those who are less than or equal to 19 years old. We use the clb option to get a 95% confidence interval for each of the parameters in the model. The model that we are fitting is ***height = b0 + b1 x female + b2 x age + b3 x fem_age + e***\n\n::: {#588ab687 .cell execution_count=3}\n``` {.python .cell-code}\nX = htwt[['female', 'AGE', 'fem_age']][htwt['AGE'] <= 19]\nX = sm.add_constant(X)\nY = htwt['HEIGHT'][htwt['AGE'] <= 19]\n\nmodel = sm.OLS(Y, X).fit()\n\nmodel.summary()\n```\n\n::: {.cell-output .cell-output-display execution_count=3}\n```{=html}\n\n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n
OLS Regression Results
Dep. Variable: HEIGHT R-squared: 0.460
Model: OLS Adj. R-squared: 0.452
Method: Least Squares F-statistic: 60.93
Date: Tue, 17 Mar 2026 Prob (F-statistic): 1.50e-28
Time: 16:38:17 Log-Likelihood: -534.17
No. Observations: 219 AIC: 1076.
Df Residuals: 215 BIC: 1090.
Df Model: 3
Covariance Type: nonrobust
\n\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\n
coef std err t P>|t| [0.025 0.975]
const 28.8828 2.873 10.052 0.000 23.219 34.547
female 13.6123 4.019 3.387 0.001 5.690 21.534
AGE 2.0313 0.178 11.435 0.000 1.681 2.381
fem_age -0.9294 0.248 -3.750 0.000 -1.418 -0.441
\n\n\n \n\n\n \n\n\n \n\n\n \n\n
Omnibus: 1.300 Durbin-Watson: 2.284
Prob(Omnibus): 0.522 Jarque-Bera (JB): 0.981
Skew: -0.133 Prob(JB): 0.612
Kurtosis: 3.191 Cond. No. 450.


Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n```\n:::\n:::\n\n\nFrom the coefficients table b0,b1,b2,b3 are estimated as b0=28.88 b1=13.61 b2=2.03 b3=-0.92942\n\nThe resulting regression model for height, age and gender based on the available data is ***height=28.8828 + 13.6123 x female + 2.0313 x age -0.9294 x fem_age***\n\n", "supporting": [ - "linear_regression_files/figure-html" + "linear_regression_files" ], "filters": [], "includes": { "include-in-header": [ - "\n\n\n" + "\n\n\n" ] } } diff --git a/_freeze/python/logistic_regression/execute-results/html.json b/_freeze/python/logistic_regression/execute-results/html.json index 1d63953b2..c2b3ed1a8 100644 --- a/_freeze/python/logistic_regression/execute-results/html.json +++ b/_freeze/python/logistic_regression/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "488cd1342703e5ecf8e4b5dfa931e938", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Logistic Regression\"\noutput: html_document\n---\n\n\n# Imports\n\n::: {#41ac130f .cell execution_count=1}\n``` {.python .cell-code}\n#data manipulation\nimport pandas as pd\nimport numpy as np\n\n#modelling\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LogisticRegression\n```\n:::\n\n\n# Background\n\nIn binary logistic regression, there is a single binary dependent variable, coded by an indicator variable. For example, if we respresent a response as 1 and non-response as 0, then the corresponding probability of response, can be between 0 (certainly not a response) and 1 (certainly a response) - hence the labeling !\n\nThe logistic model models the log-odds of an event as a linear combination of one or more independent variables (explanatory variables). If we observed $(y_i, x_i),$ where $y_i$ is a Bernoulli variable and $x_i$ a vector of explanatory variables, the model for $\\pi_i = P(y_i=1)$ is\n\n$$\n\\text{logit}(\\pi_i)= \\log\\left\\{ \\frac{\\pi_i}{1-\\pi_i}\\right\\} = \\beta_0 + \\beta x_i, i = 1,\\ldots,n \n$$\n\nThe model is especially useful in case-control studies and leads to the effect of risk factors by odds ratios.\n\n# Example : Lung cancer data\n\n*Data source: Loprinzi CL. Laurie JA. Wieand HS. Krook JE. Novotny PJ. Kugler JW. Bartel J. Law M. Bateman M. Klatt NE. et al. Prospective evaluation of prognostic variables from patient-completed questionnaires. North Central Cancer Treatment Group. Journal of Clinical Oncology. 12(3):601-7, 1994.*\n\nThese data were sourced from the R package {survival} and have been downloaded and stored in the `data` folder.\n\n::: {#fb61788b .cell execution_count=2}\n``` {.python .cell-code}\n# importing and prepare\nlung2 = pd.read_csv(\"../data/lung_cancer.csv\")\n\n#create weight loss factor while respecting missing values\n# 1: patients with a weight loss of more than zero\n# 0: patients a weight loss of zero or less\nlung2[\"wt_grp\"] = np.where(lung2[\"wt.loss\"].isnull(), np.nan, (lung2[\"wt.loss\"] > 0).astype(int))\n```\n:::\n\n\n# Logistic Regression Modelling\n\nLet's further prepare our data for modelling by selecting the explanatory variables and the dependent variable. The Python packages that we are are aware of require complete (i.e. no missing values) data so for convenience of demonstrating these methods we will drop rows with missing values.\n\n::: {#2dcd95bb .cell execution_count=3}\n``` {.python .cell-code}\nx_vars = [\"age\", \"sex\", \"ph.ecog\", \"meal.cal\"]\ny_var = \"wt_grp\"\n\n# drop rows with missing values \nlung2_complete = lung2.dropna(axis=0)\n\n#select variables\nx = lung2_complete[x_vars]\ny = lung2_complete[y_var]\n```\n:::\n\n\n## Statsmodels package\n\nWe will use the `sm.Logit()` method to fit our logistic regression model.\n\n::: {#317aeae9 .cell execution_count=4}\n``` {.python .cell-code}\n#intercept column\nx_sm = sm.add_constant(x)\n\n#fit model\nlr_sm = sm.Logit(y, x_sm).fit() \nprint(lr_sm.summary())\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nOptimization terminated successfully.\n Current function value: 0.568825\n Iterations 5\n Logit Regression Results \n==============================================================================\nDep. Variable: wt_grp No. Observations: 167\nModel: Logit Df Residuals: 162\nMethod: MLE Df Model: 4\nDate: Thu, 13 Mar 2025 Pseudo R-squ.: 0.05169\nTime: 16:21:34 Log-Likelihood: -94.994\nconverged: True LL-Null: -100.17\nCovariance Type: nonrobust LLR p-value: 0.03484\n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 3.3576 1.654 2.029 0.042 0.115 6.600\nage -0.0126 0.021 -0.598 0.550 -0.054 0.029\nsex -0.8645 0.371 -2.328 0.020 -1.592 -0.137\nph.ecog 0.4182 0.263 1.592 0.111 -0.097 0.933\nmeal.cal -0.0009 0.000 -1.932 0.053 -0.002 1.27e-05\n==============================================================================\n```\n:::\n:::\n\n\n### Model fitting\n\nIn addition to the information contained in the summary, we can display the model coefficients as odds ratios:\n\n::: {#ab8dc416 .cell execution_count=5}\n``` {.python .cell-code}\nprint(\"Odds ratios for statsmodels logistic regression:\")\nprint(np.exp(lr_sm.params))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nOdds ratios for statsmodels logistic regression:\nconst 28.719651\nage 0.987467\nsex 0.421266\nph.ecog 1.519198\nmeal.cal 0.999140\ndtype: float64\n```\n:::\n:::\n\n\nWe can also provide the 5% confidence intervals for the odds ratios:\n\n::: {#10c00933 .cell execution_count=6}\n``` {.python .cell-code}\nprint(\"CI at 5% for statsmodels logistic regression:\")\nprint(np.exp(lr_sm.conf_int(alpha = 0.05)))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nCI at 5% for statsmodels logistic regression:\n 0 1\nconst 1.121742 735.301118\nage 0.947449 1.029175\nsex 0.203432 0.872354\nph.ecog 0.907984 2.541852\nmeal.cal 0.998269 1.000013\n```\n:::\n:::\n\n\n### Prediction\n\nLet's use our trained model to make a weight loss prediction about a new patient.\n\n::: {#a8d52762 .cell execution_count=7}\n``` {.python .cell-code}\n# new female, symptomatic but completely ambulatory patient consuming 2500 calories\nnew_pt = pd.DataFrame({\n \"age\": [56],\n \"sex\": [2],\n \"ph.ecog\": [1.00], \n \"meal.cal\": [2500]\n})\n\n# Add intercept term to the new data; for a single row this should be \n# forced using the `add_constant` command\nnew_pt_sm = sm.add_constant(new_pt, has_constant=\"add\")\nprint(\"Probability of weight loss using the statsmodels package:\")\nprint(lr_sm.predict(new_pt_sm))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nProbability of weight loss using the statsmodels package:\n0 0.308057\ndtype: float64\n```\n:::\n:::\n\n\n## Scikit-learn Package\n\nThe `scikit-learn` package is a popular package for machine learning and predictive modelling.\n\n::: callout-warning\nIt's important to note that l2 regularisation is applied by default in the `scikit-learn` implementation of logistic regression. More recent releases of this package include an option to have no regularisation penalty.\n:::\n\n::: {#d0d17230 .cell execution_count=8}\n``` {.python .cell-code}\nlr_sk = LogisticRegression(penalty=None).fit(x, y)\n```\n:::\n\n\nUnlike the `statsmodels` approach `scikit-learn` doesn't have a summary method for the model but you can extract some of the model parameters as follows:\n\n::: {#6fa062ac .cell execution_count=9}\n``` {.python .cell-code}\nprint(\"Intercept for scikit learn logistic regression:\")\nprint(lr_sk.intercept_)\nprint(\"Odds ratios for scikit learn logistic regression:\")\nprint(np.exp(lr_sk.coef_))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nIntercept for scikit learn logistic regression:\n[3.35756405]\nOdds ratios for scikit learn logistic regression:\n[[0.98746739 0.42126736 1.51919379 0.99914048]]\n```\n:::\n:::\n\n\nHowever, obtaining the confidence intervals and other metrics is not directly supported in `scikit-learn`.\n\n### Prediction\n\nUsing the same new patient example we can use our logistic regression model to make a prediction. The `predict_proba` method is used to return the probability for each class. If you are interested in viewing the prediction for `y = 1`, i.e. the probability of weight loss then you can select the second probability as shown:\n\n::: {#0cd100f7 .cell execution_count=10}\n``` {.python .cell-code}\nprint(\"Probability of weight loss using the scikit-learn package:\")\nprint(lr_sk.predict_proba(new_pt)[:,1])\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nProbability of weight loss using the scikit-learn package:\n[0.30805813]\n```\n:::\n:::\n\n\n## Conclusions\n\nThere are two main ways to fit a logistic regression using python. Each of these packages have their advantages with `statsmodel` geared more towards model and coefficient interpretation in low dimensional data settings and in contrast the `scikit-learn` implementation more appropriate for use cases focused on prediction with more complex, higher dimensional data.\n\n", + "markdown": "---\ntitle: \"Logistic Regression\"\noutput: html_document\n---\n\n# Imports\n\n::: {#6bb3d03c .cell execution_count=1}\n``` {.python .cell-code}\n#data manipulation\nimport pandas as pd\nimport numpy as np\n\n#modelling\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LogisticRegression\n```\n:::\n\n\n# Background\n\nIn binary logistic regression, there is a single binary dependent variable, coded by an indicator variable. For example, if we respresent a response as 1 and non-response as 0, then the corresponding probability of response, can be between 0 (certainly not a response) and 1 (certainly a response) - hence the labeling !\n\nThe logistic model models the log-odds of an event as a linear combination of one or more independent variables (explanatory variables). If we observed $(y_i, x_i),$ where $y_i$ is a Bernoulli variable and $x_i$ a vector of explanatory variables, the model for $\\pi_i = P(y_i=1)$ is\n\n$$\n\\text{logit}(\\pi_i)= \\log\\left\\{ \\frac{\\pi_i}{1-\\pi_i}\\right\\} = \\beta_0 + \\beta x_i, i = 1,\\ldots,n \n$$\n\nThe model is especially useful in case-control studies and leads to the effect of risk factors by odds ratios.\n\n# Example : Lung cancer data\n\n*Data source: Loprinzi CL. Laurie JA. Wieand HS. Krook JE. Novotny PJ. Kugler JW. Bartel J. Law M. Bateman M. Klatt NE. et al. Prospective evaluation of prognostic variables from patient-completed questionnaires. North Central Cancer Treatment Group. Journal of Clinical Oncology. 12(3):601-7, 1994.*\n\nThese data were sourced from the R package {survival} and have been downloaded and stored in the `data` folder.\n\n::: {#d6704047 .cell execution_count=2}\n``` {.python .cell-code}\n# importing and prepare\nlung2 = pd.read_csv(\"../data/lung_cancer.csv\")\n\n#create weight loss factor while respecting missing values\n# 1: patients with a weight loss of more than zero\n# 0: patients a weight loss of zero or less\nlung2[\"wt_grp\"] = np.where(lung2[\"wt.loss\"].isnull(), np.nan, (lung2[\"wt.loss\"] > 0).astype(int))\n```\n:::\n\n\n# Logistic Regression Modelling\n\nLet's further prepare our data for modelling by selecting the explanatory variables and the dependent variable. The Python packages that we are are aware of require complete (i.e. no missing values) data so for convenience of demonstrating these methods we will drop rows with missing values.\n\n::: {#5e91d816 .cell execution_count=3}\n``` {.python .cell-code}\nx_vars = [\"age\", \"sex\", \"ph.ecog\", \"meal.cal\"]\ny_var = \"wt_grp\"\n\n# drop rows with missing values \nlung2_complete = lung2.dropna(axis=0)\n\n#select variables\nx = lung2_complete[x_vars]\ny = lung2_complete[y_var]\n```\n:::\n\n\n## Statsmodels package\n\nWe will use the `sm.Logit()` method to fit our logistic regression model.\n\n::: {#66e583e6 .cell execution_count=4}\n``` {.python .cell-code}\n#intercept column\nx_sm = sm.add_constant(x)\n\n#fit model\nlr_sm = sm.Logit(y, x_sm).fit() \nprint(lr_sm.summary())\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nOptimization terminated successfully.\n Current function value: 0.568825\n Iterations 5\n Logit Regression Results \n==============================================================================\nDep. Variable: wt_grp No. Observations: 167\nModel: Logit Df Residuals: 162\nMethod: MLE Df Model: 4\nDate: Tue, 17 Mar 2026 Pseudo R-squ.: 0.05169\nTime: 16:38:14 Log-Likelihood: -94.994\nconverged: True LL-Null: -100.17\nCovariance Type: nonrobust LLR p-value: 0.03484\n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 3.3576 1.654 2.029 0.042 0.115 6.600\nage -0.0126 0.021 -0.598 0.550 -0.054 0.029\nsex -0.8645 0.371 -2.328 0.020 -1.592 -0.137\nph.ecog 0.4182 0.263 1.592 0.111 -0.097 0.933\nmeal.cal -0.0009 0.000 -1.932 0.053 -0.002 1.27e-05\n==============================================================================\n```\n:::\n:::\n\n\n### Model fitting\n\nIn addition to the information contained in the summary, we can display the model coefficients as odds ratios:\n\n::: {#0caa37f8 .cell execution_count=5}\n``` {.python .cell-code}\nprint(\"Odds ratios for statsmodels logistic regression:\")\nprint(np.exp(lr_sm.params))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nOdds ratios for statsmodels logistic regression:\nconst 28.719651\nage 0.987467\nsex 0.421266\nph.ecog 1.519198\nmeal.cal 0.999140\ndtype: float64\n```\n:::\n:::\n\n\nWe can also provide the 5% confidence intervals for the odds ratios:\n\n::: {#322a3025 .cell execution_count=6}\n``` {.python .cell-code}\nprint(\"CI at 5% for statsmodels logistic regression:\")\nprint(np.exp(lr_sm.conf_int(alpha = 0.05)))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nCI at 5% for statsmodels logistic regression:\n 0 1\nconst 1.121742 735.301118\nage 0.947449 1.029175\nsex 0.203432 0.872354\nph.ecog 0.907984 2.541852\nmeal.cal 0.998269 1.000013\n```\n:::\n:::\n\n\n### Prediction\n\nLet's use our trained model to make a weight loss prediction about a new patient.\n\n::: {#026efa48 .cell execution_count=7}\n``` {.python .cell-code}\n# new female, symptomatic but completely ambulatory patient consuming 2500 calories\nnew_pt = pd.DataFrame({\n \"age\": [56],\n \"sex\": [2],\n \"ph.ecog\": [1.00], \n \"meal.cal\": [2500]\n})\n\n# Add intercept term to the new data; for a single row this should be \n# forced using the `add_constant` command\nnew_pt_sm = sm.add_constant(new_pt, has_constant=\"add\")\nprint(\"Probability of weight loss using the statsmodels package:\")\nprint(lr_sm.predict(new_pt_sm))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nProbability of weight loss using the statsmodels package:\n0 0.308057\ndtype: float64\n```\n:::\n:::\n\n\n## Scikit-learn Package\n\nThe `scikit-learn` package is a popular package for machine learning and predictive modelling.\n\n::: callout-warning\nIt's important to note that l2 regularisation is applied by default in the `scikit-learn` implementation of logistic regression. More recent releases of this package include an option to have no regularisation penalty.\n:::\n\n::: {#a38f95c8 .cell execution_count=8}\n``` {.python .cell-code}\nlr_sk = LogisticRegression(penalty=None).fit(x, y)\n```\n:::\n\n\nUnlike the `statsmodels` approach `scikit-learn` doesn't have a summary method for the model but you can extract some of the model parameters as follows:\n\n::: {#a7ea0c8f .cell execution_count=9}\n``` {.python .cell-code}\nprint(\"Intercept for scikit learn logistic regression:\")\nprint(lr_sk.intercept_)\nprint(\"Odds ratios for scikit learn logistic regression:\")\nprint(np.exp(lr_sk.coef_))\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nIntercept for scikit learn logistic regression:\n[3.35756405]\nOdds ratios for scikit learn logistic regression:\n[[0.98746739 0.42126736 1.51919379 0.99914048]]\n```\n:::\n:::\n\n\nHowever, obtaining the confidence intervals and other metrics is not directly supported in `scikit-learn`.\n\n### Prediction\n\nUsing the same new patient example we can use our logistic regression model to make a prediction. The `predict_proba` method is used to return the probability for each class. If you are interested in viewing the prediction for `y = 1`, i.e. the probability of weight loss then you can select the second probability as shown:\n\n::: {#1699b654 .cell execution_count=10}\n``` {.python .cell-code}\nprint(\"Probability of weight loss using the scikit-learn package:\")\nprint(lr_sk.predict_proba(new_pt)[:,1])\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nProbability of weight loss using the scikit-learn package:\n[0.30805813]\n```\n:::\n:::\n\n\n## Conclusions\n\nThere are two main ways to fit a logistic regression using python. Each of these packages have their advantages with `statsmodel` geared more towards model and coefficient interpretation in low dimensional data settings and in contrast the `scikit-learn` implementation more appropriate for use cases focused on prediction with more complex, higher dimensional data.\n\n", "supporting": [ "logistic_regression_files" ], diff --git a/_freeze/python/one_sample_t_test/execute-results/html.json b/_freeze/python/one_sample_t_test/execute-results/html.json index 4ec160189..ae93aa959 100644 --- a/_freeze/python/one_sample_t_test/execute-results/html.json +++ b/_freeze/python/one_sample_t_test/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "eeceae62149001aef0c71b5a738522a1", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"One Sample t-test in Python\"\noutput: html_document\n---\n\n\nThe One Sample t-test is used to compare a single sample against an expected hypothesis value. In the One Sample t-test, the mean of the sample is compared against the hypothesis value. In Python, a One Sample t-test can be performed using the scipy.stats.ttest_1samp(...) function from the scipy package, which accepts the following parameters:\n\n1.*a*: Sample observations.\n\n2.*popmean*: Expected value in null hypothesis. If array_like, then its length along axis must equal 1, and it must otherwise be broadcastable with a.\n\n3.*nan_policy*: Defines how to handle input NaNs.\n\n4.*alternative* (optional): Defines the alternative hypothesis.\n\n5.*keepdims*: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array.\n\n## Data Used\n\n::: {#6e08b9b6 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Create sample data\ndata = {\n 'score': [40, 47, 52, 26, 19, 25, 35, 39, 26, 48, 14, 22, 42, 34, 33, 18, 15, 29, 41, 44, 51, 43, 27, 46, 28, 49, 31, 28, 54, 45],\n 'count': [2, 2, 2, 1, 2, 2, 4, 1, 1, 1, 2, 1, 1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1]\n}\n\ndf = pd.DataFrame(data)\n```\n:::\n\n\nThe following code was used to test the comparison in Python. Note that the baseline null hypothesis goes in the \"popmean\" parameter.\n\n::: {#a98dd98d .cell execution_count=2}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy import stats\n\n# Perform one-sample t-test\nsample_mean = df['score'].mean()\nnull_mean = 30 # Hypothetical null hypothesis mean for comparison\nalpha = 0.05 # Significance level\n\nt_statistic, p_value = stats.ttest_1samp(df['score'], null_mean)\n\nprint(f\"t: {t_statistic}\")\nprint(f\"p-value: {p_value}\")\nprint(f\"mean of x: {sample_mean}\")\n\nif p_value < alpha:\n print(\"Reject null hypothesis: There is a significant difference.\")\nelse:\n print(\"Fail to reject null hypothesis: There is no significant difference.\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nt: 2.364306444879101\np-value: 0.02497410401836272\nmean of x: 35.03333333333333\nReject null hypothesis: There is a significant difference.\n```\n:::\n:::\n\n\n", + "markdown": "---\ntitle: \"One Sample t-test in Python\"\noutput: html_document\n---\n\nThe One Sample t-test is used to compare a single sample against an expected hypothesis value. In the One Sample t-test, the mean of the sample is compared against the hypothesis value. In Python, a One Sample t-test can be performed using the scipy.stats.ttest_1samp(...) function from the scipy package, which accepts the following parameters:\n\n1.*a*: Sample observations.\n\n2.*popmean*: Expected value in null hypothesis. If array_like, then its length along axis must equal 1, and it must otherwise be broadcastable with a.\n\n3.*nan_policy*: Defines how to handle input NaNs.\n\n4.*alternative* (optional): Defines the alternative hypothesis.\n\n5.*keepdims*: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array.\n\n## Data Used\n\n::: {#1419aeb6 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Create sample data\ndata = {\n 'score': [40, 47, 52, 26, 19, 25, 35, 39, 26, 48, 14, 22, 42, 34, 33, 18, 15, 29, 41, 44, 51, 43, 27, 46, 28, 49, 31, 28, 54, 45],\n 'count': [2, 2, 2, 1, 2, 2, 4, 1, 1, 1, 2, 1, 1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1]\n}\n\ndf = pd.DataFrame(data)\n```\n:::\n\n\nThe following code was used to test the comparison in Python. Note that the baseline null hypothesis goes in the \"popmean\" parameter.\n\n::: {#8609d1f4 .cell execution_count=2}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy import stats\n\n# Perform one-sample t-test\nsample_mean = df['score'].mean()\nnull_mean = 30 # Hypothetical null hypothesis mean for comparison\nalpha = 0.05 # Significance level\n\nt_statistic, p_value = stats.ttest_1samp(df['score'], null_mean)\n\nprint(f\"t: {t_statistic}\")\nprint(f\"p-value: {p_value}\")\nprint(f\"mean of x: {sample_mean}\")\n\nif p_value < alpha:\n print(\"Reject null hypothesis: There is a significant difference.\")\nelse:\n print(\"Fail to reject null hypothesis: There is no significant difference.\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nt: 2.364306444879101\np-value: 0.02497410401836272\nmean of x: 35.03333333333333\nReject null hypothesis: There is a significant difference.\n```\n:::\n:::\n\n\n", "supporting": [ "one_sample_t_test_files" ], diff --git a/_freeze/python/paired_t_test/execute-results/html.json b/_freeze/python/paired_t_test/execute-results/html.json index 0de02f3b8..33789bd83 100644 --- a/_freeze/python/paired_t_test/execute-results/html.json +++ b/_freeze/python/paired_t_test/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "3e592bc323dfdd5578126e526331ea9d", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Paired t-test\"\noutput: html_document\n---\n\n\nPaired t-tests are used to test the difference of means for two dependant variables. In the Paired t-test, the difference of the means between the two samples is compared to a given number that represents the null hypothesis. For a Paired t-test, the number of observations in each sample must be equal.\n\nIn Python, a Paired t-test can be performed using the scipy.stats.ttest_rel(...) function from the scipy package, which accepts the following parameters:\n\n1.*a, b*: Sample observations. The arrays must have the same shape.\n\n2.*axis*: If an int, the axis of the input along which to compute the statistic. The statistic of each axis-slice (e.g. row) of the input will appear in a corresponding element of the output. If None, the input will be raveled before computing the statistic.\n\n3.*nan_policy*: Defines how to handle input NaNs.\n\n4.*alternative* (optional): Defines the alternative hypothesis.\n\n5.*keepdims*: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array.\n\n## Data Used\n\n::: {#a0a90aff .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Create sample data\ndata = {\n 'SBPbefore': [120, 124, 130, 118, 140, 128, 140, 135, 126, 130, 126, 127],\n 'SBPafter': [128, 131, 131, 127, 132, 125, 141, 137, 118, 132, 129, 135]\n}\n\ndf_pressure = pd.DataFrame(data)\n```\n:::\n\n\nThe following code was used to test the comparison in Python. Note that the baseline null hypothesis goes in the \"popmean\" parameter.\n\n::: {#135ff692 .cell execution_count=2}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy import stats\n\n# Perform paired t-test\nt_stat, p_value = stats.ttest_rel(df_pressure['SBPbefore'], df_pressure['SBPafter'])\n\n# Print results\nprint(\"Paired t-test:\")\nprint(f\"t = {t_stat}\")\nprint(f\"p-value = {p_value}\")\n\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nPaired t-test:\nt = -1.0896479884009451\np-value = 0.299163498777129\n```\n:::\n:::\n\n\n", + "markdown": "---\ntitle: \"Paired t-test\"\noutput: html_document\n---\n\nPaired t-tests are used to test the difference of means for two dependant variables. In the Paired t-test, the difference of the means between the two samples is compared to a given number that represents the null hypothesis. For a Paired t-test, the number of observations in each sample must be equal.\n\nIn Python, a Paired t-test can be performed using the scipy.stats.ttest_rel(...) function from the scipy package, which accepts the following parameters:\n\n1.*a, b*: Sample observations. The arrays must have the same shape.\n\n2.*axis*: If an int, the axis of the input along which to compute the statistic. The statistic of each axis-slice (e.g. row) of the input will appear in a corresponding element of the output. If None, the input will be raveled before computing the statistic.\n\n3.*nan_policy*: Defines how to handle input NaNs.\n\n4.*alternative* (optional): Defines the alternative hypothesis.\n\n5.*keepdims*: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array.\n\n## Data Used\n\n::: {#edfb1a4c .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\n\n# Create sample data\ndata = {\n 'SBPbefore': [120, 124, 130, 118, 140, 128, 140, 135, 126, 130, 126, 127],\n 'SBPafter': [128, 131, 131, 127, 132, 125, 141, 137, 118, 132, 129, 135]\n}\n\ndf_pressure = pd.DataFrame(data)\n```\n:::\n\n\nThe following code was used to test the comparison in Python. Note that the baseline null hypothesis goes in the \"popmean\" parameter.\n\n::: {#6c9a59a8 .cell execution_count=2}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy import stats\n\n# Perform paired t-test\nt_stat, p_value = stats.ttest_rel(df_pressure['SBPbefore'], df_pressure['SBPafter'])\n\n# Print results\nprint(\"Paired t-test:\")\nprint(f\"t = {t_stat}\")\nprint(f\"p-value = {p_value}\")\n\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nPaired t-test:\nt = -1.0896479884009451\np-value = 0.299163498777129\n```\n:::\n:::\n\n\n", "supporting": [ "paired_t_test_files" ], diff --git a/_freeze/python/skewness_kurtosis/execute-results/html.json b/_freeze/python/skewness_kurtosis/execute-results/html.json index f2d0cc58f..97fae5dc3 100644 --- a/_freeze/python/skewness_kurtosis/execute-results/html.json +++ b/_freeze/python/skewness_kurtosis/execute-results/html.json @@ -2,9 +2,9 @@ "hash": "bd2f6501555b922af19afa1c7e24ba4e", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Skewness/Kurtosis\"\noutput: html_document\n---\n\n\n# **Skewness and Kurtosis in Python**\n\nSkewness measures the the amount of asymmetry in a distribution, while Kurtosis describes the \"tailedness\" of the curve.\nThese measures are frequently used to assess the normality of the data.\nThere are several methods to calculate these measures. In Python, the packages **pandas**, **scipy.stats.skew** and **scipy.stats.kurtosis** can be used.\n\n## Data Used\n\n::: {#4e7afb5a .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import skew, kurtosis\n\n# Create sample data\ndata = {\n 'team': [\"A\"]*5 + [\"B\"]*5 + [\"C\"]*5,\n 'points': [10, 17, 17, 18, 15, 10, 14, 13, 29, 25, 12, 30, 34, 12, 11],\n 'assists': [2, 5, 6, 3, 0, 2, 5, 4, 0, 2, 1, 1, 3, 4, 7]\n}\ndf = pd.DataFrame(data)\n```\n:::\n\n\n#### Skewness \n\nJoanes and Gill (1998) discusses three methods for estimating skewness:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_1 = m_1/m_2^{3/2}$$\n\n- Type 2: Used in SAS and SPSS\n\n $$\n G_1 = g_1\\sqrt{n(n-1)}/(n-2)\n $$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$\n b_1 = m_3/s^3 = g_1((n-1)/n)^{3/2}\n $$\n\nAll three skewness measures are unbiased under normality. The three methods are illustrated in the following code:\n\n::: {#75d139f5 .cell message='false' execution_count=2}\n``` {.python .cell-code}\n# Skewness\ntype1_skew = skew(df['points'])\ntype2_skew = df['points'].skew()\ntype3_skew = skew(df['points']) * ((len(df['points']) - 1) / len(df['points'])) ** (3/2)\n\nprint(f\"Skewness - Type 1: {type1_skew}\")\nprint(f\"Skewness - Type 2: {type2_skew}\")\nprint(f\"Skewness - Type 3: {type3_skew}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nSkewness - Type 1: 0.9054442043798532\nSkewness - Type 2: 1.0093179298709385\nSkewness - Type 3: 0.816426058828937\n```\n:::\n:::\n\n\nThe default for the **scipy.stats.skew** function is type 1.\n\n#### Kurtosis\n\nJoanes and Gill (1998) discuss three methods for estimating kurtosis:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_2 = m_4/m_2^{2}-3$$\n\n- Type 2: Used in SAS and SPSS\n\n $$G_2 = ((n+1)g_2+6)*\\frac{(n-1)}{(n-2)(n-3)}$$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$b_2 = m_4/s^4-3 = (g_2 + 3)(1-1/n)^2-3$$\n\nOnly $G_2$ (corresponding to type 2) is unbiased under normality. The three methods are illustrated in the following code:\n\n::: {#d743038e .cell message='false' execution_count=3}\n``` {.python .cell-code}\n# Kurtosis\ntype1_kurt = kurtosis(df['points'])\n\nn = len(df['points'])\ng2 = kurtosis(df['points'], fisher=True) # Fisher's kurtosis\n\n# Calculate the kurtosis type using the formula G2\ntype2_kurt = ((n + 1) * g2 + 6) * ((n - 1) / ((n - 2) * (n - 3)))\n\n# Calculate the kurtosis type using the formula b2\nn = len(df['points'])\ng2 = kurtosis(df['points'], fisher=True) # Fisher's kurtosis\n\ntype3_kurt = (g2 + 3) * ((1 - 1/n) ** 2) - 3\n\nprint(f\"Kurtosis - Type 1: {type1_kurt}\")\nprint(f\"Kurtosis - Type 2: {type2_kurt}\")\nprint(f\"Kurtosis - Type 3: {type3_kurt}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nKurtosis - Type 1: -0.5833410771247833\nKurtosis - Type 2: -0.2991564184355863\nKurtosis - Type 3: -0.8948215605175891\n```\n:::\n:::\n\n\nThe default for the **scipy.stats.kurtosis** function is type 1.\n\n", + "markdown": "---\ntitle: \"Skewness/Kurtosis\"\noutput: html_document\n---\n\n# **Skewness and Kurtosis in Python**\n\nSkewness measures the the amount of asymmetry in a distribution, while Kurtosis describes the \"tailedness\" of the curve.\nThese measures are frequently used to assess the normality of the data.\nThere are several methods to calculate these measures. In Python, the packages **pandas**, **scipy.stats.skew** and **scipy.stats.kurtosis** can be used.\n\n## Data Used\n\n::: {#8feb9029 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nfrom scipy.stats import skew, kurtosis\n\n# Create sample data\ndata = {\n 'team': [\"A\"]*5 + [\"B\"]*5 + [\"C\"]*5,\n 'points': [10, 17, 17, 18, 15, 10, 14, 13, 29, 25, 12, 30, 34, 12, 11],\n 'assists': [2, 5, 6, 3, 0, 2, 5, 4, 0, 2, 1, 1, 3, 4, 7]\n}\ndf = pd.DataFrame(data)\n```\n:::\n\n\n#### Skewness \n\nJoanes and Gill (1998) discusses three methods for estimating skewness:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_1 = m_1/m_2^{3/2}$$\n\n- Type 2: Used in SAS and SPSS\n\n $$\n G_1 = g_1\\sqrt{n(n-1)}/(n-2)\n $$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$\n b_1 = m_3/s^3 = g_1((n-1)/n)^{3/2}\n $$\n\nAll three skewness measures are unbiased under normality. The three methods are illustrated in the following code:\n\n::: {#29a89d16 .cell message='false' execution_count=2}\n``` {.python .cell-code}\n# Skewness\ntype1_skew = skew(df['points'])\ntype2_skew = df['points'].skew()\ntype3_skew = skew(df['points']) * ((len(df['points']) - 1) / len(df['points'])) ** (3/2)\n\nprint(f\"Skewness - Type 1: {type1_skew}\")\nprint(f\"Skewness - Type 2: {type2_skew}\")\nprint(f\"Skewness - Type 3: {type3_skew}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nSkewness - Type 1: 0.9054442043798532\nSkewness - Type 2: 1.0093179298709385\nSkewness - Type 3: 0.816426058828937\n```\n:::\n:::\n\n\nThe default for the **scipy.stats.skew** function is type 1.\n\n#### Kurtosis\n\nJoanes and Gill (1998) discuss three methods for estimating kurtosis:\n\n- Type 1: This is the typical definition used in many older textbooks\n\n$$g_2 = m_4/m_2^{2}-3$$\n\n- Type 2: Used in SAS and SPSS\n\n $$G_2 = ((n+1)g_2+6)*\\frac{(n-1)}{(n-2)(n-3)}$$\n\n- Type 3: Used in MINITAB and BMDP\n\n $$b_2 = m_4/s^4-3 = (g_2 + 3)(1-1/n)^2-3$$\n\nOnly $G_2$ (corresponding to type 2) is unbiased under normality. The three methods are illustrated in the following code:\n\n::: {#666e1aa2 .cell message='false' execution_count=3}\n``` {.python .cell-code}\n# Kurtosis\ntype1_kurt = kurtosis(df['points'])\n\nn = len(df['points'])\ng2 = kurtosis(df['points'], fisher=True) # Fisher's kurtosis\n\n# Calculate the kurtosis type using the formula G2\ntype2_kurt = ((n + 1) * g2 + 6) * ((n - 1) / ((n - 2) * (n - 3)))\n\n# Calculate the kurtosis type using the formula b2\nn = len(df['points'])\ng2 = kurtosis(df['points'], fisher=True) # Fisher's kurtosis\n\ntype3_kurt = (g2 + 3) * ((1 - 1/n) ** 2) - 3\n\nprint(f\"Kurtosis - Type 1: {type1_kurt}\")\nprint(f\"Kurtosis - Type 2: {type2_kurt}\")\nprint(f\"Kurtosis - Type 3: {type3_kurt}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nKurtosis - Type 1: -0.5833410771247833\nKurtosis - Type 2: -0.2991564184355863\nKurtosis - Type 3: -0.8948215605175891\n```\n:::\n:::\n\n\nThe default for the **scipy.stats.kurtosis** function is type 1.\n\n", "supporting": [ - "skewness_kurtosis_files/figure-html" + "skewness_kurtosis_files" ], "filters": [], "includes": {} diff --git a/_freeze/python/survey-stats-summary/execute-results/html.json b/_freeze/python/survey-stats-summary/execute-results/html.json index 0fcbe6dd7..f50cdb7cb 100644 --- a/_freeze/python/survey-stats-summary/execute-results/html.json +++ b/_freeze/python/survey-stats-summary/execute-results/html.json @@ -2,7 +2,7 @@ "hash": "b4897c935b6e3778bb51e73fb37b4bf3", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Survey Summary Statistics using Python\"\nbibliography: ../Comp/survey-stats-summary.bib\n---\n\nWhen conducting large-scale trials on samples of the population, it can be necessary to use a more complex sampling design than a simple random sample.\n\n- **Weighting** – If smaller populations are sampled more heavily to increase precision, then it is necessary to weight these observations in the analysis.\n\n- **Finite population correction** – Larger samples of populations result in lower variability in comparison to smaller samples.\n\n- **Stratification** – Dividing a population into sub-groups and sampling from each group. This protects from obtaining a very poor sample (e.g. under or over-represented groups), can give samples of a known precision, and gives more precise estimates for population means and totals.\n\n- **Clustering** – Dividing a population into sub-groups, and only sampling certain groups. This gives a lower precision, however can be much more convenient and cheaper - for example if surveying school children you may only sample a subset of schools to avoid travelling to a school to interview a single child.\n\nAll of these designs need to be taken into account when calculating statistics, and when producing models. Only summary statistics are discussed in this document, and variances are calculated using Taylor series linearisation methods. For a more detailed introduction to calculating survey statistics using statistical software, see [@Lohr_2022].\n\nThe ecosystem of survey statistics packages is less mature in Python than in R or SAS, however there is a package that provides a subset of the functionality: [`samplics`](https://samplics-org.github.io/samplics/).\n\n# Complex Survey Designs\n\nFor R and SAS, we give examples of summary statistics on a simple survey design which just had a finite population correction. Unfortunately, `samplics` does not have the ability to just use an fpc with no PSU or Strata, so we will instead demonstrate just with a more complete (and realistic) survey design, using the NHANES [@NHANES_2010] dataset:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
SDMVPSUSDMVSTRAWTMEC2YRHI_CHOLraceagecatRIAGENDR
18381528.7702(19,39]1
18414509.2803(0,19]1
28612041.6403(0,19]1
27521000.3403(59,Inf]2
18822633.5801(19,39]1
28574112.4912(39,59]2
\n
\n```\n\n:::\n:::\n\n\n# Summary Statistics\n\n## Mean\n\nIf we want to calculate a mean of a variable in a dataset using `samplics`, we need to create an estimator object using the estimation method we will use - here Taylor Series estimation - and the parameter we are estimating. Then, we can specify the survey design by passing columns which define our strata and PSUs, and a column to estimate:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nimport numpy as np\nimport pandas as pd\n\nfrom samplics import TaylorEstimator\nfrom samplics.utils.types import PopParam\n\nnhanes = pd.read_csv(\"../data/nhanes.csv\")\n\nmean_estimator = TaylorEstimator(PopParam.mean)\n\nmean_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprint(mean_estimator.to_dataframe())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _estimate _stderror _lci _uci _cv\n0 PopParam.mean 0.112143 0.005446 0.100598 0.123688 0.048562\n```\n\n\n:::\n:::\n\n\n## Total\n\nCalculating population totals can be done by changing the `TaylorEstimator` parameter to `PopParam.total`:\n\n\n::: {.cell}\n\n```{.python .cell-code}\ntotal_estimator = TaylorEstimator(PopParam.total)\n\ntotal_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprint(total_estimator.to_dataframe())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _estimate ... _uci _cv\n0 PopParam.total 2.863525e+07 ... 3.291896e+07 0.070567\n\n[1 rows x 6 columns]\n```\n\n\n:::\n:::\n\n\n## Ratios\n\nCalculating population ratios can be done by changing the `TaylorEstimator` parameter to `PopParam.ratio`, and additionally specifying an `x` parameter in the `estimate` method:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nratio_estimator = TaylorEstimator(PopParam.ratio)\n\nratio_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n x=nhanes[\"RIAGENDR\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprint(ratio_estimator.to_dataframe())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _estimate _stderror _lci _uci _cv\n0 PopParam.ratio 0.074222 0.003715 0.066347 0.082097 0.050049\n```\n\n\n:::\n:::\n\n\n## Proportions\n\nCalculating proportions can be done by changing the `TaylorEstimator` parameter to `PopParam.prop`:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nprop_estimator = TaylorEstimator(PopParam.prop)\n\nprop_estimator.estimate(\n y=nhanes[\"agecat\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprop_estimator.to_dataframe()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _level _estimate _stderror _lci _uci _cv\n0 PopParam.prop (0,19] 0.207749 0.006130 0.195054 0.221044 0.029506\n1 PopParam.prop (19,39] 0.293408 0.009561 0.273557 0.314077 0.032585\n2 PopParam.prop (39,59] 0.303290 0.004519 0.293795 0.312955 0.014901\n3 PopParam.prop (59,Inf] 0.195553 0.008093 0.178965 0.213280 0.041383\n```\n\n\n:::\n:::\n\n\n## Quantiles\n\n`samplics` currently does not have a method to calculate quantiles.\n\n## Domain Estimations\n\nWe can perform domain estimations of different sub-populations by passing our domain column as a parameter to the `estimate` method:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nmean_estimator = TaylorEstimator(PopParam.mean)\n\nmean_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n domain=nhanes[\"race\"],\n remove_nan=True,\n)\nmean_estimator.to_dataframe()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _domain _estimate _stderror _lci _uci _cv\n0 PopParam.mean 1 0.101492 0.006246 0.088251 0.114732 0.061540\n1 PopParam.mean 2 0.121649 0.006604 0.107649 0.135649 0.054288\n2 PopParam.mean 3 0.078640 0.010385 0.056626 0.100655 0.132053\n3 PopParam.mean 4 0.099679 0.024666 0.047389 0.151969 0.247458\n```\n\n\n:::\n:::\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-23\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n * ── Packages attached to the search path.\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Python configuration ────────────────────────────────────────────────────────\n Python 3.12.6 (v3.12.6:a4a2d2b0d85, Sep 6 2024, 16:08:03) [Clang 13.0.0 (clang-1300.0.29.30)]\n samplics 0.4.22\n```\n\n\n:::\n:::\n\n:::\n", + "markdown": "---\ntitle: \"Survey Summary Statistics using Python\"\nbibliography: ../Comp/survey-stats-summary.bib\n---\n\nWhen conducting large-scale trials on samples of the population, it can be necessary to use a more complex sampling design than a simple random sample.\n\n- **Weighting** – If smaller populations are sampled more heavily to increase precision, then it is necessary to weight these observations in the analysis.\n\n- **Finite population correction** – Larger samples of populations result in lower variability in comparison to smaller samples.\n\n- **Stratification** – Dividing a population into sub-groups and sampling from each group. This protects from obtaining a very poor sample (e.g. under or over-represented groups), can give samples of a known precision, and gives more precise estimates for population means and totals.\n\n- **Clustering** – Dividing a population into sub-groups, and only sampling certain groups. This gives a lower precision, however can be much more convenient and cheaper - for example if surveying school children you may only sample a subset of schools to avoid travelling to a school to interview a single child.\n\nAll of these designs need to be taken into account when calculating statistics, and when producing models. Only summary statistics are discussed in this document, and variances are calculated using Taylor series linearisation methods. For a more detailed introduction to calculating survey statistics using statistical software, see [@Lohr_2022].\n\nThe ecosystem of survey statistics packages is less mature in Python than in R or SAS, however there is a package that provides a subset of the functionality: [`samplics`](https://samplics-org.github.io/samplics/).\n\n# Complex Survey Designs\n\nFor R and SAS, we give examples of summary statistics on a simple survey design which just had a finite population correction. Unfortunately, `samplics` does not have the ability to just use an fpc with no PSU or Strata, so we will instead demonstrate just with a more complete (and realistic) survey design, using the NHANES [@NHANES_2010] dataset:\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n
SDMVPSUSDMVSTRAWTMEC2YRHI_CHOLraceagecatRIAGENDR
18381528.7702(19,39]1
18414509.2803(0,19]1
28612041.6403(0,19]1
27521000.3403(59,Inf]2
18822633.5801(19,39]1
28574112.4912(39,59]2
\n
\n```\n\n:::\n:::\n\n\n# Summary Statistics\n\n## Mean\n\nIf we want to calculate a mean of a variable in a dataset using `samplics`, we need to create an estimator object using the estimation method we will use - here Taylor Series estimation - and the parameter we are estimating. Then, we can specify the survey design by passing columns which define our strata and PSUs, and a column to estimate:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nimport numpy as np\nimport pandas as pd\n\nfrom samplics import TaylorEstimator\nfrom samplics.utils.types import PopParam\n\nnhanes = pd.read_csv(\"../data/nhanes.csv\")\n\nmean_estimator = TaylorEstimator(PopParam.mean)\n\nmean_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprint(mean_estimator.to_dataframe())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _estimate _stderror _lci _uci _cv\n0 PopParam.mean 0.112143 0.005446 0.100598 0.123688 0.048562\n```\n\n\n:::\n:::\n\n\n## Total\n\nCalculating population totals can be done by changing the `TaylorEstimator` parameter to `PopParam.total`:\n\n\n::: {.cell}\n\n```{.python .cell-code}\ntotal_estimator = TaylorEstimator(PopParam.total)\n\ntotal_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprint(total_estimator.to_dataframe())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _estimate ... _uci _cv\n0 PopParam.total 2.863525e+07 ... 3.291896e+07 0.070567\n\n[1 rows x 6 columns]\n```\n\n\n:::\n:::\n\n\n## Ratios\n\nCalculating population ratios can be done by changing the `TaylorEstimator` parameter to `PopParam.ratio`, and additionally specifying an `x` parameter in the `estimate` method:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nratio_estimator = TaylorEstimator(PopParam.ratio)\n\nratio_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n x=nhanes[\"RIAGENDR\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprint(ratio_estimator.to_dataframe())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _estimate _stderror _lci _uci _cv\n0 PopParam.ratio 0.074222 0.003715 0.066347 0.082097 0.050049\n```\n\n\n:::\n:::\n\n\n## Proportions\n\nCalculating proportions can be done by changing the `TaylorEstimator` parameter to `PopParam.prop`:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nprop_estimator = TaylorEstimator(PopParam.prop)\n\nprop_estimator.estimate(\n y=nhanes[\"agecat\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n remove_nan=True,\n)\nprop_estimator.to_dataframe()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _level _estimate _stderror _lci _uci _cv\n0 PopParam.prop (0,19] 0.207749 0.006130 0.195054 0.221044 0.029506\n1 PopParam.prop (19,39] 0.293408 0.009561 0.273557 0.314077 0.032585\n2 PopParam.prop (39,59] 0.303290 0.004519 0.293795 0.312955 0.014901\n3 PopParam.prop (59,Inf] 0.195553 0.008093 0.178965 0.213280 0.041383\n```\n\n\n:::\n:::\n\n\n## Quantiles\n\n`samplics` currently does not have a method to calculate quantiles.\n\n## Domain Estimations\n\nWe can perform domain estimations of different sub-populations by passing our domain column as a parameter to the `estimate` method:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nmean_estimator = TaylorEstimator(PopParam.mean)\n\nmean_estimator.estimate(\n y=nhanes[\"HI_CHOL\"],\n samp_weight=nhanes[\"WTMEC2YR\"],\n psu=nhanes[\"SDMVPSU\"],\n stratum=nhanes[\"SDMVSTRA\"],\n domain=nhanes[\"race\"],\n remove_nan=True,\n)\nmean_estimator.to_dataframe()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n _param _domain _estimate _stderror _lci _uci _cv\n0 PopParam.mean 1 0.101492 0.006246 0.088251 0.114732 0.061540\n1 PopParam.mean 2 0.121649 0.006604 0.107649 0.135649 0.054288\n2 PopParam.mean 3 0.078640 0.010385 0.056626 0.100655 0.132053\n3 PopParam.mean 4 0.099679 0.024666 0.047389 0.151969 0.247458\n```\n\n\n:::\n:::\n\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.3 (2026-03-11)\n os Ubuntu 24.04.4 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-26\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P survey * 4.4-8 2025-08-28 [?] RSPM (R 4.5.0)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.3/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Python configuration ────────────────────────────────────────────────────────\n Python 3.12.3 (main, Mar 3 2026, 12:15:18) [GCC 13.3.0]\n samplics 0.4.22\n```\n\n\n:::\n:::\n\n:::\n", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/python/two_samples_t_test/execute-results/html.json b/_freeze/python/two_samples_t_test/execute-results/html.json index 4ff962be1..79fda71c4 100644 --- a/_freeze/python/two_samples_t_test/execute-results/html.json +++ b/_freeze/python/two_samples_t_test/execute-results/html.json @@ -2,9 +2,9 @@ "hash": "196b1c1b8bd8420a5feff26b4e79fe01", "result": { "engine": "jupyter", - "markdown": "---\ntitle: \"Two Sample t-test in Python\"\noutput: html_document\n---\n\n\nThe Two Sample t-test is used to compare two independent samples against each other. In the Two Sample t-test, the mean of the first sample is compared against the mean of the second sample. In Python, a Two Sample t-test can be performed using the **stats** package from scipy.\n\n### Data Used\n\nThe following data was used in this example.\n\n::: {#cb0c1139 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\n\n# Create sample data\ndata = {\n 'trt_grp': ['placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment'],\n 'WtGain': [94, 12, 26, 89, 88, 96, 85, 130, 75, 54, 112, 69, 104, 95, 53, 21, 45, 62, 96, 128, 120, 99, 28, 50, 109, 115, 39, 96, 87, 100, 76, 80]\n}\n\ndf = pd.DataFrame(data)\n```\n:::\n\n\nIf we have normalized data, we can use the classic Student's t-test. For a Two sample test where the variances are not equal, we should use the Welch's t-test. Both of those options are available in the scipy **stats** package.\n\n### Student's T-Test\n\n#### Code\n\nThe following code was used to test the comparison in Python. Note that we must separate the single variable into two variables to satisfy the scipy **stats** package syntax.\n\n::: {#4dd966ac .cell execution_count=2}\n``` {.python .cell-code}\n# Separate data into two groups\ngroup1 = df[df['trt_grp'] == 'placebo']['WtGain']\ngroup2 = df[df['trt_grp'] == 'treatment']['WtGain']\n\n# Perform Student's t-test assuming equal variances\nt_stat, p_value_equal_var = stats.ttest_ind(group1, group2, equal_var=True)\n\nprint(\"Student's T-Test assuming equal variances:\")\nprint(f\"T-statistic: {t_stat}\")\nprint(f\"P-value: {p_value_equal_var}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nStudent's T-Test assuming equal variances:\nT-statistic: -0.6969002027708538\nP-value: 0.4912306166204561\n```\n:::\n:::\n\n\n### Welch's T-Test\n\n#### Code\n\nThe following code was used to test the comparison in Python using Welch's t-test.\n\n::: {#3b39c76f .cell execution_count=3}\n``` {.python .cell-code}\n# Perform Welch's t-test assuming unequal variances\nt_stat_welch, p_value_unequal_var = stats.ttest_ind(group1, group2, equal_var=False)\n\nprint(\"\\nWelch's T-Test assuming unequal variances:\")\nprint(f\"T-statistic: {t_stat_welch}\")\nprint(f\"P-value: {p_value_unequal_var}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n\nWelch's T-Test assuming unequal variances:\nT-statistic: -0.6969002027708538\nP-value: 0.4912856152047901\n```\n:::\n:::\n\n\n", + "markdown": "---\ntitle: \"Two Sample t-test in Python\"\noutput: html_document\n---\n\nThe Two Sample t-test is used to compare two independent samples against each other. In the Two Sample t-test, the mean of the first sample is compared against the mean of the second sample. In Python, a Two Sample t-test can be performed using the **stats** package from scipy.\n\n### Data Used\n\nThe following data was used in this example.\n\n::: {#aadaa4e7 .cell execution_count=1}\n``` {.python .cell-code}\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\n\n# Create sample data\ndata = {\n 'trt_grp': ['placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'placebo', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment', 'treatment'],\n 'WtGain': [94, 12, 26, 89, 88, 96, 85, 130, 75, 54, 112, 69, 104, 95, 53, 21, 45, 62, 96, 128, 120, 99, 28, 50, 109, 115, 39, 96, 87, 100, 76, 80]\n}\n\ndf = pd.DataFrame(data)\n```\n:::\n\n\nIf we have normalized data, we can use the classic Student's t-test. For a Two sample test where the variances are not equal, we should use the Welch's t-test. Both of those options are available in the scipy **stats** package.\n\n### Student's T-Test\n\n#### Code\n\nThe following code was used to test the comparison in Python. Note that we must separate the single variable into two variables to satisfy the scipy **stats** package syntax.\n\n::: {#7ac99697 .cell execution_count=2}\n``` {.python .cell-code}\n# Separate data into two groups\ngroup1 = df[df['trt_grp'] == 'placebo']['WtGain']\ngroup2 = df[df['trt_grp'] == 'treatment']['WtGain']\n\n# Perform Student's t-test assuming equal variances\nt_stat, p_value_equal_var = stats.ttest_ind(group1, group2, equal_var=True)\n\nprint(\"Student's T-Test assuming equal variances:\")\nprint(f\"T-statistic: {t_stat}\")\nprint(f\"P-value: {p_value_equal_var}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\nStudent's T-Test assuming equal variances:\nT-statistic: -0.6969002027708538\nP-value: 0.4912306166204561\n```\n:::\n:::\n\n\n### Welch's T-Test\n\n#### Code\n\nThe following code was used to test the comparison in Python using Welch's t-test.\n\n::: {#3feec605 .cell execution_count=3}\n``` {.python .cell-code}\n# Perform Welch's t-test assuming unequal variances\nt_stat_welch, p_value_unequal_var = stats.ttest_ind(group1, group2, equal_var=False)\n\nprint(\"\\nWelch's T-Test assuming unequal variances:\")\nprint(f\"T-statistic: {t_stat_welch}\")\nprint(f\"P-value: {p_value_unequal_var}\")\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n\nWelch's T-Test assuming unequal variances:\nT-statistic: -0.6969002027708538\nP-value: 0.4912856152047901\n```\n:::\n:::\n\n\n", "supporting": [ - "two_samples_t_test_files/figure-html" + "two_samples_t_test_files" ], "filters": [], "includes": {} diff --git a/_freeze/site_libs/crosstalk-1.2.1/css/crosstalk.min.css b/_freeze/site_libs/crosstalk-1.2.1/css/crosstalk.min.css deleted file mode 100644 index 6b4538284..000000000 --- a/_freeze/site_libs/crosstalk-1.2.1/css/crosstalk.min.css +++ /dev/null @@ -1 +0,0 @@ -.container-fluid.crosstalk-bscols{margin-left:-30px;margin-right:-30px;white-space:normal}body>.container-fluid.crosstalk-bscols{margin-left:auto;margin-right:auto}.crosstalk-input-checkboxgroup .crosstalk-options-group .crosstalk-options-column{display:inline-block;padding-right:12px;vertical-align:top}@media only screen and (max-width: 480px){.crosstalk-input-checkboxgroup .crosstalk-options-group .crosstalk-options-column{display:block;padding-right:inherit}}.crosstalk-input{margin-bottom:15px}.crosstalk-input .control-label{margin-bottom:0;vertical-align:middle}.crosstalk-input input[type="checkbox"]{margin:4px 0 0;margin-top:1px;line-height:normal}.crosstalk-input .checkbox{position:relative;display:block;margin-top:10px;margin-bottom:10px}.crosstalk-input .checkbox>label{padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.crosstalk-input .checkbox input[type="checkbox"],.crosstalk-input .checkbox-inline input[type="checkbox"]{position:absolute;margin-top:2px;margin-left:-20px}.crosstalk-input .checkbox+.checkbox{margin-top:-5px}.crosstalk-input .checkbox-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.crosstalk-input .checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px} diff --git a/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.js b/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.js deleted file mode 100644 index fd9eb53d2..000000000 --- a/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.js +++ /dev/null @@ -1,1474 +0,0 @@ -(function(){function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o b) { - return 1; - } -} - -/** - * @private - */ - -var FilterSet = function () { - function FilterSet() { - _classCallCheck(this, FilterSet); - - this.reset(); - } - - _createClass(FilterSet, [{ - key: "reset", - value: function reset() { - // Key: handle ID, Value: array of selected keys, or null - this._handles = {}; - // Key: key string, Value: count of handles that include it - this._keys = {}; - this._value = null; - this._activeHandles = 0; - } - }, { - key: "update", - value: function update(handleId, keys) { - if (keys !== null) { - keys = keys.slice(0); // clone before sorting - keys.sort(naturalComparator); - } - - var _diffSortedLists = (0, _util.diffSortedLists)(this._handles[handleId], keys), - added = _diffSortedLists.added, - removed = _diffSortedLists.removed; - - this._handles[handleId] = keys; - - for (var i = 0; i < added.length; i++) { - this._keys[added[i]] = (this._keys[added[i]] || 0) + 1; - } - for (var _i = 0; _i < removed.length; _i++) { - this._keys[removed[_i]]--; - } - - this._updateValue(keys); - } - - /** - * @param {string[]} keys Sorted array of strings that indicate - * a superset of possible keys. - * @private - */ - - }, { - key: "_updateValue", - value: function _updateValue() { - var keys = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : this._allKeys; - - var handleCount = Object.keys(this._handles).length; - if (handleCount === 0) { - this._value = null; - } else { - this._value = []; - for (var i = 0; i < keys.length; i++) { - var count = this._keys[keys[i]]; - if (count === handleCount) { - this._value.push(keys[i]); - } - } - } - } - }, { - key: "clear", - value: function clear(handleId) { - if (typeof this._handles[handleId] === "undefined") { - return; - } - - var keys = this._handles[handleId]; - if (!keys) { - keys = []; - } - - for (var i = 0; i < keys.length; i++) { - this._keys[keys[i]]--; - } - delete this._handles[handleId]; - - this._updateValue(); - } - }, { - key: "value", - get: function get() { - return this._value; - } - }, { - key: "_allKeys", - get: function get() { - var allKeys = Object.keys(this._keys); - allKeys.sort(naturalComparator); - return allKeys; - } - }]); - - return FilterSet; -}(); - -exports.default = FilterSet; - -},{"./util":11}],4:[function(require,module,exports){ -(function (global){ -"use strict"; - -Object.defineProperty(exports, "__esModule", { - value: true -}); - -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; - -exports.default = group; - -var _var2 = require("./var"); - -var _var3 = _interopRequireDefault(_var2); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -// Use a global so that multiple copies of crosstalk.js can be loaded and still -// have groups behave as singletons across all copies. -global.__crosstalk_groups = global.__crosstalk_groups || {}; -var groups = global.__crosstalk_groups; - -function group(groupName) { - if (groupName && typeof groupName === "string") { - if (!groups.hasOwnProperty(groupName)) { - groups[groupName] = new Group(groupName); - } - return groups[groupName]; - } else if ((typeof groupName === "undefined" ? "undefined" : _typeof(groupName)) === "object" && groupName._vars && groupName.var) { - // Appears to already be a group object - return groupName; - } else if (Array.isArray(groupName) && groupName.length == 1 && typeof groupName[0] === "string") { - return group(groupName[0]); - } else { - throw new Error("Invalid groupName argument"); - } -} - -var Group = function () { - function Group(name) { - _classCallCheck(this, Group); - - this.name = name; - this._vars = {}; - } - - _createClass(Group, [{ - key: "var", - value: function _var(name) { - if (!name || typeof name !== "string") { - throw new Error("Invalid var name"); - } - - if (!this._vars.hasOwnProperty(name)) this._vars[name] = new _var3.default(this, name); - return this._vars[name]; - } - }, { - key: "has", - value: function has(name) { - if (!name || typeof name !== "string") { - throw new Error("Invalid var name"); - } - - return this._vars.hasOwnProperty(name); - } - }]); - - return Group; -}(); - -}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) - -},{"./var":12}],5:[function(require,module,exports){ -(function (global){ -"use strict"; - -Object.defineProperty(exports, "__esModule", { - value: true -}); - -var _group = require("./group"); - -var _group2 = _interopRequireDefault(_group); - -var _selection = require("./selection"); - -var _filter = require("./filter"); - -var _input = require("./input"); - -require("./input_selectize"); - -require("./input_checkboxgroup"); - -require("./input_slider"); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -var defaultGroup = (0, _group2.default)("default"); - -function var_(name) { - return defaultGroup.var(name); -} - -function has(name) { - return defaultGroup.has(name); -} - -if (global.Shiny) { - global.Shiny.addCustomMessageHandler("update-client-value", function (message) { - if (typeof message.group === "string") { - (0, _group2.default)(message.group).var(message.name).set(message.value); - } else { - var_(message.name).set(message.value); - } - }); -} - -var crosstalk = { - group: _group2.default, - var: var_, - has: has, - SelectionHandle: _selection.SelectionHandle, - FilterHandle: _filter.FilterHandle, - bind: _input.bind -}; - -/** - * @namespace crosstalk - */ -exports.default = crosstalk; - -global.crosstalk = crosstalk; - -}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) - -},{"./filter":2,"./group":4,"./input":6,"./input_checkboxgroup":7,"./input_selectize":8,"./input_slider":9,"./selection":10}],6:[function(require,module,exports){ -(function (global){ -"use strict"; - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.register = register; -exports.bind = bind; -var $ = global.jQuery; - -var bindings = {}; - -function register(reg) { - bindings[reg.className] = reg; - if (global.document && global.document.readyState !== "complete") { - $(function () { - bind(); - }); - } else if (global.document) { - setTimeout(bind, 100); - } -} - -function bind() { - Object.keys(bindings).forEach(function (className) { - var binding = bindings[className]; - $("." + binding.className).not(".crosstalk-input-bound").each(function (i, el) { - bindInstance(binding, el); - }); - }); -} - -// Escape jQuery identifier -function $escape(val) { - return val.replace(/([!"#$%&'()*+,./:;<=>?@[\\\]^`{|}~])/g, "\\$1"); -} - -function bindEl(el) { - var $el = $(el); - Object.keys(bindings).forEach(function (className) { - if ($el.hasClass(className) && !$el.hasClass("crosstalk-input-bound")) { - var binding = bindings[className]; - bindInstance(binding, el); - } - }); -} - -function bindInstance(binding, el) { - var jsonEl = $(el).find("script[type='application/json'][data-for='" + $escape(el.id) + "']"); - var data = JSON.parse(jsonEl[0].innerText); - - var instance = binding.factory(el, data); - $(el).data("crosstalk-instance", instance); - $(el).addClass("crosstalk-input-bound"); -} - -if (global.Shiny) { - var inputBinding = new global.Shiny.InputBinding(); - var _$ = global.jQuery; - _$.extend(inputBinding, { - find: function find(scope) { - return _$(scope).find(".crosstalk-input"); - }, - initialize: function initialize(el) { - if (!_$(el).hasClass("crosstalk-input-bound")) { - bindEl(el); - } - }, - getId: function getId(el) { - return el.id; - }, - getValue: function getValue(el) {}, - setValue: function setValue(el, value) {}, - receiveMessage: function receiveMessage(el, data) {}, - subscribe: function subscribe(el, callback) { - _$(el).data("crosstalk-instance").resume(); - }, - unsubscribe: function unsubscribe(el) { - _$(el).data("crosstalk-instance").suspend(); - } - }); - global.Shiny.inputBindings.register(inputBinding, "crosstalk.inputBinding"); -} - -}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) - -},{}],7:[function(require,module,exports){ -(function (global){ -"use strict"; - -var _input = require("./input"); - -var input = _interopRequireWildcard(_input); - -var _filter = require("./filter"); - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -var $ = global.jQuery; - -input.register({ - className: "crosstalk-input-checkboxgroup", - - factory: function factory(el, data) { - /* - * map: {"groupA": ["keyA", "keyB", ...], ...} - * group: "ct-groupname" - */ - var ctHandle = new _filter.FilterHandle(data.group); - - var lastKnownKeys = void 0; - var $el = $(el); - $el.on("change", "input[type='checkbox']", function () { - var checked = $el.find("input[type='checkbox']:checked"); - if (checked.length === 0) { - lastKnownKeys = null; - ctHandle.clear(); - } else { - var keys = {}; - checked.each(function () { - data.map[this.value].forEach(function (key) { - keys[key] = true; - }); - }); - var keyArray = Object.keys(keys); - keyArray.sort(); - lastKnownKeys = keyArray; - ctHandle.set(keyArray); - } - }); - - return { - suspend: function suspend() { - ctHandle.clear(); - }, - resume: function resume() { - if (lastKnownKeys) ctHandle.set(lastKnownKeys); - } - }; - } -}); - -}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) - -},{"./filter":2,"./input":6}],8:[function(require,module,exports){ -(function (global){ -"use strict"; - -var _input = require("./input"); - -var input = _interopRequireWildcard(_input); - -var _util = require("./util"); - -var util = _interopRequireWildcard(_util); - -var _filter = require("./filter"); - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -var $ = global.jQuery; - -input.register({ - className: "crosstalk-input-select", - - factory: function factory(el, data) { - /* - * items: {value: [...], label: [...]} - * map: {"groupA": ["keyA", "keyB", ...], ...} - * group: "ct-groupname" - */ - - var first = [{ value: "", label: "(All)" }]; - var items = util.dataframeToD3(data.items); - var opts = { - options: first.concat(items), - valueField: "value", - labelField: "label", - searchField: "label" - }; - - var select = $(el).find("select")[0]; - - var selectize = $(select).selectize(opts)[0].selectize; - - var ctHandle = new _filter.FilterHandle(data.group); - - var lastKnownKeys = void 0; - selectize.on("change", function () { - if (selectize.items.length === 0) { - lastKnownKeys = null; - ctHandle.clear(); - } else { - var keys = {}; - selectize.items.forEach(function (group) { - data.map[group].forEach(function (key) { - keys[key] = true; - }); - }); - var keyArray = Object.keys(keys); - keyArray.sort(); - lastKnownKeys = keyArray; - ctHandle.set(keyArray); - } - }); - - return { - suspend: function suspend() { - ctHandle.clear(); - }, - resume: function resume() { - if (lastKnownKeys) ctHandle.set(lastKnownKeys); - } - }; - } -}); - -}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) - -},{"./filter":2,"./input":6,"./util":11}],9:[function(require,module,exports){ -(function (global){ -"use strict"; - -var _slicedToArray = function () { function sliceIterator(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"]) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } return function (arr, i) { if (Array.isArray(arr)) { return arr; } else if (Symbol.iterator in Object(arr)) { return sliceIterator(arr, i); } else { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } }; }(); - -var _input = require("./input"); - -var input = _interopRequireWildcard(_input); - -var _filter = require("./filter"); - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -var $ = global.jQuery; -var strftime = global.strftime; - -input.register({ - className: "crosstalk-input-slider", - - factory: function factory(el, data) { - /* - * map: {"groupA": ["keyA", "keyB", ...], ...} - * group: "ct-groupname" - */ - var ctHandle = new _filter.FilterHandle(data.group); - - var opts = {}; - var $el = $(el).find("input"); - var dataType = $el.data("data-type"); - var timeFormat = $el.data("time-format"); - var round = $el.data("round"); - var timeFormatter = void 0; - - // Set up formatting functions - if (dataType === "date") { - timeFormatter = strftime.utc(); - opts.prettify = function (num) { - return timeFormatter(timeFormat, new Date(num)); - }; - } else if (dataType === "datetime") { - var timezone = $el.data("timezone"); - if (timezone) timeFormatter = strftime.timezone(timezone);else timeFormatter = strftime; - - opts.prettify = function (num) { - return timeFormatter(timeFormat, new Date(num)); - }; - } else if (dataType === "number") { - if (typeof round !== "undefined") opts.prettify = function (num) { - var factor = Math.pow(10, round); - return Math.round(num * factor) / factor; - }; - } - - $el.ionRangeSlider(opts); - - function getValue() { - var result = $el.data("ionRangeSlider").result; - - // Function for converting numeric value from slider to appropriate type. - var convert = void 0; - var dataType = $el.data("data-type"); - if (dataType === "date") { - convert = function convert(val) { - return formatDateUTC(new Date(+val)); - }; - } else if (dataType === "datetime") { - convert = function convert(val) { - // Convert ms to s - return +val / 1000; - }; - } else { - convert = function convert(val) { - return +val; - }; - } - - if ($el.data("ionRangeSlider").options.type === "double") { - return [convert(result.from), convert(result.to)]; - } else { - return convert(result.from); - } - } - - var lastKnownKeys = null; - - $el.on("change.crosstalkSliderInput", function (event) { - if (!$el.data("updating") && !$el.data("animating")) { - var _getValue = getValue(), - _getValue2 = _slicedToArray(_getValue, 2), - from = _getValue2[0], - to = _getValue2[1]; - - var keys = []; - for (var i = 0; i < data.values.length; i++) { - var val = data.values[i]; - if (val >= from && val <= to) { - keys.push(data.keys[i]); - } - } - keys.sort(); - ctHandle.set(keys); - lastKnownKeys = keys; - } - }); - - // let $el = $(el); - // $el.on("change", "input[type="checkbox"]", function() { - // let checked = $el.find("input[type="checkbox"]:checked"); - // if (checked.length === 0) { - // ctHandle.clear(); - // } else { - // let keys = {}; - // checked.each(function() { - // data.map[this.value].forEach(function(key) { - // keys[key] = true; - // }); - // }); - // let keyArray = Object.keys(keys); - // keyArray.sort(); - // ctHandle.set(keyArray); - // } - // }); - - return { - suspend: function suspend() { - ctHandle.clear(); - }, - resume: function resume() { - if (lastKnownKeys) ctHandle.set(lastKnownKeys); - } - }; - } -}); - -// Convert a number to a string with leading zeros -function padZeros(n, digits) { - var str = n.toString(); - while (str.length < digits) { - str = "0" + str; - }return str; -} - -// Given a Date object, return a string in yyyy-mm-dd format, using the -// UTC date. This may be a day off from the date in the local time zone. -function formatDateUTC(date) { - if (date instanceof Date) { - return date.getUTCFullYear() + "-" + padZeros(date.getUTCMonth() + 1, 2) + "-" + padZeros(date.getUTCDate(), 2); - } else { - return null; - } -} - -}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) - -},{"./filter":2,"./input":6}],10:[function(require,module,exports){ -"use strict"; - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.SelectionHandle = undefined; - -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -var _events = require("./events"); - -var _events2 = _interopRequireDefault(_events); - -var _group = require("./group"); - -var _group2 = _interopRequireDefault(_group); - -var _util = require("./util"); - -var util = _interopRequireWildcard(_util); - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -/** - * Use this class to read and write (and listen for changes to) the selection - * for a Crosstalk group. This is intended to be used for linked brushing. - * - * If two (or more) `SelectionHandle` instances in the same webpage share the - * same group name, they will share the same state. Setting the selection using - * one `SelectionHandle` instance will result in the `value` property instantly - * changing across the others, and `"change"` event listeners on all instances - * (including the one that initiated the sending) will fire. - * - * @param {string} [group] - The name of the Crosstalk group, or if none, - * null or undefined (or any other falsy value). This can be changed later - * via the [SelectionHandle#setGroup](#setGroup) method. - * @param {Object} [extraInfo] - An object whose properties will be copied to - * the event object whenever an event is emitted. - */ -var SelectionHandle = exports.SelectionHandle = function () { - function SelectionHandle() { - var group = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; - var extraInfo = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null; - - _classCallCheck(this, SelectionHandle); - - this._eventRelay = new _events2.default(); - this._emitter = new util.SubscriptionTracker(this._eventRelay); - - // Name of the group we're currently tracking, if any. Can change over time. - this._group = null; - // The Var we're currently tracking, if any. Can change over time. - this._var = null; - // The event handler subscription we currently have on var.on("change"). - this._varOnChangeSub = null; - - this._extraInfo = util.extend({ sender: this }, extraInfo); - - this.setGroup(group); - } - - /** - * Changes the Crosstalk group membership of this SelectionHandle. The group - * being switched away from (if any) will not have its selection value - * modified as a result of calling `setGroup`, even if this handle was the - * most recent handle to set the selection of the group. - * - * The group being switched to (if any) will also not have its selection value - * modified as a result of calling `setGroup`. If you want to set the - * selection value of the new group, call `set` explicitly. - * - * @param {string} group - The name of the Crosstalk group, or null (or - * undefined) to clear the group. - */ - - - _createClass(SelectionHandle, [{ - key: "setGroup", - value: function setGroup(group) { - var _this = this; - - // If group is unchanged, do nothing - if (this._group === group) return; - // Treat null, undefined, and other falsy values the same - if (!this._group && !group) return; - - if (this._var) { - this._var.off("change", this._varOnChangeSub); - this._var = null; - this._varOnChangeSub = null; - } - - this._group = group; - - if (group) { - this._var = (0, _group2.default)(group).var("selection"); - var sub = this._var.on("change", function (e) { - _this._eventRelay.trigger("change", e, _this); - }); - this._varOnChangeSub = sub; - } - } - - /** - * Retrieves the current selection for the group represented by this - * `SelectionHandle`. - * - * - If no selection is active, then this value will be falsy. - * - If a selection is active, but no data points are selected, then this - * value will be an empty array. - * - If a selection is active, and data points are selected, then the keys - * of the selected data points will be present in the array. - */ - - }, { - key: "_mergeExtraInfo", - - - /** - * Combines the given `extraInfo` (if any) with the handle's default - * `_extraInfo` (if any). - * @private - */ - value: function _mergeExtraInfo(extraInfo) { - // Important incidental effect: shallow clone is returned - return util.extend({}, this._extraInfo ? this._extraInfo : null, extraInfo ? extraInfo : null); - } - - /** - * Overwrites the current selection for the group, and raises the `"change"` - * event among all of the group's '`SelectionHandle` instances (including - * this one). - * - * @fires SelectionHandle#change - * @param {string[]} selectedKeys - Falsy, empty array, or array of keys (see - * {@link SelectionHandle#value}). - * @param {Object} [extraInfo] - Extra properties to be included on the event - * object that's passed to listeners (in addition to any options that were - * passed into the `SelectionHandle` constructor). - */ - - }, { - key: "set", - value: function set(selectedKeys, extraInfo) { - if (this._var) this._var.set(selectedKeys, this._mergeExtraInfo(extraInfo)); - } - - /** - * Overwrites the current selection for the group, and raises the `"change"` - * event among all of the group's '`SelectionHandle` instances (including - * this one). - * - * @fires SelectionHandle#change - * @param {Object} [extraInfo] - Extra properties to be included on the event - * object that's passed to listeners (in addition to any that were passed - * into the `SelectionHandle` constructor). - */ - - }, { - key: "clear", - value: function clear(extraInfo) { - if (this._var) this.set(void 0, this._mergeExtraInfo(extraInfo)); - } - - /** - * Subscribes to events on this `SelectionHandle`. - * - * @param {string} eventType - Indicates the type of events to listen to. - * Currently, only `"change"` is supported. - * @param {SelectionHandle~listener} listener - The callback function that - * will be invoked when the event occurs. - * @return {string} - A token to pass to {@link SelectionHandle#off} to cancel - * this subscription. - */ - - }, { - key: "on", - value: function on(eventType, listener) { - return this._emitter.on(eventType, listener); - } - - /** - * Cancels event subscriptions created by {@link SelectionHandle#on}. - * - * @param {string} eventType - The type of event to unsubscribe. - * @param {string|SelectionHandle~listener} listener - Either the callback - * function previously passed into {@link SelectionHandle#on}, or the - * string that was returned from {@link SelectionHandle#on}. - */ - - }, { - key: "off", - value: function off(eventType, listener) { - return this._emitter.off(eventType, listener); - } - - /** - * Shuts down the `SelectionHandle` object. - * - * Removes all event listeners that were added through this handle. - */ - - }, { - key: "close", - value: function close() { - this._emitter.removeAllListeners(); - this.setGroup(null); - } - }, { - key: "value", - get: function get() { - return this._var ? this._var.get() : null; - } - }]); - - return SelectionHandle; -}(); - -/** - * @callback SelectionHandle~listener - * @param {Object} event - An object containing details of the event. For - * `"change"` events, this includes the properties `value` (the new - * value of the selection, or `undefined` if no selection is active), - * `oldValue` (the previous value of the selection), and `sender` (the - * `SelectionHandle` instance that made the change). - */ - -/** - * @event SelectionHandle#change - * @type {object} - * @property {object} value - The new value of the selection, or `undefined` - * if no selection is active. - * @property {object} oldValue - The previous value of the selection. - * @property {SelectionHandle} sender - The `SelectionHandle` instance that - * changed the value. - */ - -},{"./events":1,"./group":4,"./util":11}],11:[function(require,module,exports){ -"use strict"; - -Object.defineProperty(exports, "__esModule", { - value: true -}); - -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; - -exports.extend = extend; -exports.checkSorted = checkSorted; -exports.diffSortedLists = diffSortedLists; -exports.dataframeToD3 = dataframeToD3; - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -function extend(target) { - for (var _len = arguments.length, sources = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { - sources[_key - 1] = arguments[_key]; - } - - for (var i = 0; i < sources.length; i++) { - var src = sources[i]; - if (typeof src === "undefined" || src === null) continue; - - for (var key in src) { - if (src.hasOwnProperty(key)) { - target[key] = src[key]; - } - } - } - return target; -} - -function checkSorted(list) { - for (var i = 1; i < list.length; i++) { - if (list[i] <= list[i - 1]) { - throw new Error("List is not sorted or contains duplicate"); - } - } -} - -function diffSortedLists(a, b) { - var i_a = 0; - var i_b = 0; - - if (!a) a = []; - if (!b) b = []; - - var a_only = []; - var b_only = []; - - checkSorted(a); - checkSorted(b); - - while (i_a < a.length && i_b < b.length) { - if (a[i_a] === b[i_b]) { - i_a++; - i_b++; - } else if (a[i_a] < b[i_b]) { - a_only.push(a[i_a++]); - } else { - b_only.push(b[i_b++]); - } - } - - if (i_a < a.length) a_only = a_only.concat(a.slice(i_a)); - if (i_b < b.length) b_only = b_only.concat(b.slice(i_b)); - return { - removed: a_only, - added: b_only - }; -} - -// Convert from wide: { colA: [1,2,3], colB: [4,5,6], ... } -// to long: [ {colA: 1, colB: 4}, {colA: 2, colB: 5}, ... ] -function dataframeToD3(df) { - var names = []; - var length = void 0; - for (var name in df) { - if (df.hasOwnProperty(name)) names.push(name); - if (_typeof(df[name]) !== "object" || typeof df[name].length === "undefined") { - throw new Error("All fields must be arrays"); - } else if (typeof length !== "undefined" && length !== df[name].length) { - throw new Error("All fields must be arrays of the same length"); - } - length = df[name].length; - } - var results = []; - var item = void 0; - for (var row = 0; row < length; row++) { - item = {}; - for (var col = 0; col < names.length; col++) { - item[names[col]] = df[names[col]][row]; - } - results.push(item); - } - return results; -} - -/** - * Keeps track of all event listener additions/removals and lets all active - * listeners be removed with a single operation. - * - * @private - */ - -var SubscriptionTracker = exports.SubscriptionTracker = function () { - function SubscriptionTracker(emitter) { - _classCallCheck(this, SubscriptionTracker); - - this._emitter = emitter; - this._subs = {}; - } - - _createClass(SubscriptionTracker, [{ - key: "on", - value: function on(eventType, listener) { - var sub = this._emitter.on(eventType, listener); - this._subs[sub] = eventType; - return sub; - } - }, { - key: "off", - value: function off(eventType, listener) { - var sub = this._emitter.off(eventType, listener); - if (sub) { - delete this._subs[sub]; - } - return sub; - } - }, { - key: "removeAllListeners", - value: function removeAllListeners() { - var _this = this; - - var current_subs = this._subs; - this._subs = {}; - Object.keys(current_subs).forEach(function (sub) { - _this._emitter.off(current_subs[sub], sub); - }); - } - }]); - - return SubscriptionTracker; -}(); - -},{}],12:[function(require,module,exports){ -(function (global){ -"use strict"; - -Object.defineProperty(exports, "__esModule", { - value: true -}); - -var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; - -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -var _events = require("./events"); - -var _events2 = _interopRequireDefault(_events); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -var Var = function () { - function Var(group, name, /*optional*/value) { - _classCallCheck(this, Var); - - this._group = group; - this._name = name; - this._value = value; - this._events = new _events2.default(); - } - - _createClass(Var, [{ - key: "get", - value: function get() { - return this._value; - } - }, { - key: "set", - value: function set(value, /*optional*/event) { - if (this._value === value) { - // Do nothing; the value hasn't changed - return; - } - var oldValue = this._value; - this._value = value; - // Alert JavaScript listeners that the value has changed - var evt = {}; - if (event && (typeof event === "undefined" ? "undefined" : _typeof(event)) === "object") { - for (var k in event) { - if (event.hasOwnProperty(k)) evt[k] = event[k]; - } - } - evt.oldValue = oldValue; - evt.value = value; - this._events.trigger("change", evt, this); - - // TODO: Make this extensible, to let arbitrary back-ends know that - // something has changed - if (global.Shiny && global.Shiny.onInputChange) { - global.Shiny.onInputChange(".clientValue-" + (this._group.name !== null ? this._group.name + "-" : "") + this._name, typeof value === "undefined" ? null : value); - } - } - }, { - key: "on", - value: function on(eventType, listener) { - return this._events.on(eventType, listener); - } - }, { - key: "off", - value: function off(eventType, listener) { - return this._events.off(eventType, listener); - } - }]); - - return Var; -}(); - -exports.default = Var; - -}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) - -},{"./events":1}]},{},[5]) -//# sourceMappingURL=crosstalk.js.map diff --git a/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.js.map b/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.js.map deleted file mode 100644 index cff94f089..000000000 --- a/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.js.map +++ /dev/null @@ -1,37 +0,0 @@ -{ - "version": 3, - "sources": [ - "node_modules/browser-pack/_prelude.js", - "javascript/src/events.js", - "javascript/src/filter.js", - "javascript/src/filterset.js", - "javascript/src/group.js", - "javascript/src/index.js", - "javascript/src/input.js", - "javascript/src/input_checkboxgroup.js", - "javascript/src/input_selectize.js", - "javascript/src/input_slider.js", - "javascript/src/selection.js", - "javascript/src/util.js", - "javascript/src/var.js" - ], - "names": [], - "mappings": "AAAA;;;;;;;;;;;ICAqB,M;AACnB,oBAAc;AAAA;;AACZ,SAAK,MAAL,GAAc,EAAd;AACA,SAAK,IAAL,GAAY,CAAZ;AACD;;;;uBAEE,S,EAAW,Q,EAAU;AACtB,UAAI,OAAO,KAAK,MAAL,CAAY,SAAZ,CAAX;AACA,UAAI,CAAC,IAAL,EAAW;AACT,eAAO,KAAK,MAAL,CAAY,SAAZ,IAAyB,EAAhC;AACD;AACD,UAAI,MAAM,QAAS,KAAK,IAAL,EAAnB;AACA,WAAK,GAAL,IAAY,QAAZ;AACA,aAAO,GAAP;AACD;;AAED;;;;wBACI,S,EAAW,Q,EAAU;AACvB,UAAI,OAAO,KAAK,MAAL,CAAY,SAAZ,CAAX;AACA,UAAI,OAAO,QAAP,KAAqB,UAAzB,EAAqC;AACnC,aAAK,IAAI,GAAT,IAAgB,IAAhB,EAAsB;AACpB,cAAI,KAAK,cAAL,CAAoB,GAApB,CAAJ,EAA8B;AAC5B,gBAAI,KAAK,GAAL,MAAc,QAAlB,EAA4B;AAC1B,qBAAO,KAAK,GAAL,CAAP;AACA,qBAAO,GAAP;AACD;AACF;AACF;AACD,eAAO,KAAP;AACD,OAVD,MAUO,IAAI,OAAO,QAAP,KAAqB,QAAzB,EAAmC;AACxC,YAAI,QAAQ,KAAK,QAAL,CAAZ,EAA4B;AAC1B,iBAAO,KAAK,QAAL,CAAP;AACA,iBAAO,QAAP;AACD;AACD,eAAO,KAAP;AACD,OANM,MAMA;AACL,cAAM,IAAI,KAAJ,CAAU,8BAAV,CAAN;AACD;AACF;;;4BAEO,S,EAAW,G,EAAK,O,EAAS;AAC/B,UAAI,OAAO,KAAK,MAAL,CAAY,SAAZ,CAAX;AACA,WAAK,IAAI,GAAT,IAAgB,IAAhB,EAAsB;AACpB,YAAI,KAAK,cAAL,CAAoB,GAApB,CAAJ,EAA8B;AAC5B,eAAK,GAAL,EAAU,IAAV,CAAe,OAAf,EAAwB,GAAxB;AACD;AACF;AACF;;;;;;kBA/CkB,M;;;;;;;;;;;;ACArB;;;;AACA;;;;AACA;;;;AACA;;IAAY,I;;;;;;;;AAEZ,SAAS,YAAT,CAAsB,KAAtB,EAA6B;AAC3B,MAAI,QAAQ,MAAM,GAAN,CAAU,WAAV,CAAZ;AACA,MAAI,SAAS,MAAM,GAAN,EAAb;AACA,MAAI,CAAC,MAAL,EAAa;AACX,aAAS,yBAAT;AACA,UAAM,GAAN,CAAU,MAAV;AACD;AACD,SAAO,MAAP;AACD;;AAED,IAAI,KAAK,CAAT;AACA,SAAS,MAAT,GAAkB;AAChB,SAAO,IAAP;AACD;;AAED;;;;;;;;;;;;;;;;;;;;;;;;;IAwBa,Y,WAAA,Y;AACX,wBAAY,KAAZ,EAAmB,SAAnB,EAA8B;AAAA;;AAC5B,SAAK,WAAL,GAAmB,sBAAnB;AACA,SAAK,QAAL,GAAgB,IAAI,KAAK,mBAAT,CAA6B,KAAK,WAAlC,CAAhB;;AAEA;AACA,SAAK,MAAL,GAAc,IAAd;AACA;AACA,SAAK,UAAL,GAAkB,IAAlB;AACA;AACA,SAAK,UAAL,GAAkB,IAAlB;AACA;AACA,SAAK,eAAL,GAAuB,IAAvB;;AAEA,SAAK,UAAL,GAAkB,KAAK,MAAL,CAAY,EAAE,QAAQ,IAAV,EAAZ,EAA8B,SAA9B,CAAlB;;AAEA,SAAK,GAAL,GAAW,WAAW,QAAtB;;AAEA,SAAK,QAAL,CAAc,KAAd;AACD;;AAED;;;;;;;;;;;;;;6BAUS,K,EAAO;AAAA;;AACd;AACA,UAAI,KAAK,MAAL,KAAgB,KAApB,EACE;AACF;AACA,UAAI,CAAC,KAAK,MAAN,IAAgB,CAAC,KAArB,EACE;;AAEF,UAAI,KAAK,UAAT,EAAqB;AACnB,aAAK,UAAL,CAAgB,GAAhB,CAAoB,QAApB,EAA8B,KAAK,eAAnC;AACA,aAAK,KAAL;AACA,aAAK,eAAL,GAAuB,IAAvB;AACA,aAAK,UAAL,GAAkB,IAAlB;AACA,aAAK,UAAL,GAAkB,IAAlB;AACD;;AAED,WAAK,MAAL,GAAc,KAAd;;AAEA,UAAI,KAAJ,EAAW;AACT,gBAAQ,qBAAI,KAAJ,CAAR;AACA,aAAK,UAAL,GAAkB,aAAa,KAAb,CAAlB;AACA,aAAK,UAAL,GAAkB,qBAAI,KAAJ,EAAW,GAAX,CAAe,QAAf,CAAlB;AACA,YAAI,MAAM,KAAK,UAAL,CAAgB,EAAhB,CAAmB,QAAnB,EAA6B,UAAC,CAAD,EAAO;AAC5C,gBAAK,WAAL,CAAiB,OAAjB,CAAyB,QAAzB,EAAmC,CAAnC;AACD,SAFS,CAAV;AAGA,aAAK,eAAL,GAAuB,GAAvB;AACD;AACF;;AAED;;;;;;;;oCAKgB,S,EAAW;AACzB,aAAO,KAAK,MAAL,CAAY,EAAZ,EACL,KAAK,UAAL,GAAkB,KAAK,UAAvB,GAAoC,IAD/B,EAEL,YAAY,SAAZ,GAAwB,IAFnB,CAAP;AAGD;;AAED;;;;;;;4BAIQ;AACN,WAAK,QAAL,CAAc,kBAAd;AACA,WAAK,KAAL;AACA,WAAK,QAAL,CAAc,IAAd;AACD;;AAED;;;;;;;;;;;;0BASM,S,EAAW;AACf,UAAI,CAAC,KAAK,UAAV,EACE;AACF,WAAK,UAAL,CAAgB,KAAhB,CAAsB,KAAK,GAA3B;AACA,WAAK,SAAL,CAAe,SAAf;AACD;;AAED;;;;;;;;;;;;;;;;;;;;wBAiBI,I,EAAM,S,EAAW;AACnB,UAAI,CAAC,KAAK,UAAV,EACE;AACF,WAAK,UAAL,CAAgB,MAAhB,CAAuB,KAAK,GAA5B,EAAiC,IAAjC;AACA,WAAK,SAAL,CAAe,SAAf;AACD;;AAED;;;;;;;;;;AASA;;;;;;;;;;uBAUG,S,EAAW,Q,EAAU;AACtB,aAAO,KAAK,QAAL,CAAc,EAAd,CAAiB,SAAjB,EAA4B,QAA5B,CAAP;AACD;;AAED;;;;;;;;;;;wBAQI,S,EAAW,Q,EAAU;AACvB,aAAO,KAAK,QAAL,CAAc,GAAd,CAAkB,SAAlB,EAA6B,QAA7B,CAAP;AACD;;;8BAES,S,EAAW;AACnB,UAAI,CAAC,KAAK,UAAV,EACE;AACF,WAAK,UAAL,CAAgB,GAAhB,CAAoB,KAAK,UAAL,CAAgB,KAApC,EAA2C,KAAK,eAAL,CAAqB,SAArB,CAA3C;AACD;;AAED;;;;;;;;;;;wBApCmB;AACjB,aAAO,KAAK,UAAL,GAAkB,KAAK,UAAL,CAAgB,KAAlC,GAA0C,IAAjD;AACD;;;;;;AA6CH;;;;;;;;;;;;;;;;;;;ACzNA;;;;AAEA,SAAS,iBAAT,CAA2B,CAA3B,EAA8B,CAA9B,EAAiC;AAC/B,MAAI,MAAM,CAAV,EAAa;AACX,WAAO,CAAP;AACD,GAFD,MAEO,IAAI,IAAI,CAAR,EAAW;AAChB,WAAO,CAAC,CAAR;AACD,GAFM,MAEA,IAAI,IAAI,CAAR,EAAW;AAChB,WAAO,CAAP;AACD;AACF;;AAED;;;;IAGqB,S;AACnB,uBAAc;AAAA;;AACZ,SAAK,KAAL;AACD;;;;4BAEO;AACN;AACA,WAAK,QAAL,GAAgB,EAAhB;AACA;AACA,WAAK,KAAL,GAAa,EAAb;AACA,WAAK,MAAL,GAAc,IAAd;AACA,WAAK,cAAL,GAAsB,CAAtB;AACD;;;2BAMM,Q,EAAU,I,EAAM;AACrB,UAAI,SAAS,IAAb,EAAmB;AACjB,eAAO,KAAK,KAAL,CAAW,CAAX,CAAP,CADiB,CACK;AACtB,aAAK,IAAL,CAAU,iBAAV;AACD;;AAJoB,6BAME,2BAAgB,KAAK,QAAL,CAAc,QAAd,CAAhB,EAAyC,IAAzC,CANF;AAAA,UAMhB,KANgB,oBAMhB,KANgB;AAAA,UAMT,OANS,oBAMT,OANS;;AAOrB,WAAK,QAAL,CAAc,QAAd,IAA0B,IAA1B;;AAEA,WAAK,IAAI,IAAI,CAAb,EAAgB,IAAI,MAAM,MAA1B,EAAkC,GAAlC,EAAuC;AACrC,aAAK,KAAL,CAAW,MAAM,CAAN,CAAX,IAAuB,CAAC,KAAK,KAAL,CAAW,MAAM,CAAN,CAAX,KAAwB,CAAzB,IAA8B,CAArD;AACD;AACD,WAAK,IAAI,KAAI,CAAb,EAAgB,KAAI,QAAQ,MAA5B,EAAoC,IAApC,EAAyC;AACvC,aAAK,KAAL,CAAW,QAAQ,EAAR,CAAX;AACD;;AAED,WAAK,YAAL,CAAkB,IAAlB;AACD;;AAED;;;;;;;;mCAKmC;AAAA,UAAtB,IAAsB,uEAAf,KAAK,QAAU;;AACjC,UAAI,cAAc,OAAO,IAAP,CAAY,KAAK,QAAjB,EAA2B,MAA7C;AACA,UAAI,gBAAgB,CAApB,EAAuB;AACrB,aAAK,MAAL,GAAc,IAAd;AACD,OAFD,MAEO;AACL,aAAK,MAAL,GAAc,EAAd;AACA,aAAK,IAAI,IAAI,CAAb,EAAgB,IAAI,KAAK,MAAzB,EAAiC,GAAjC,EAAsC;AACpC,cAAI,QAAQ,KAAK,KAAL,CAAW,KAAK,CAAL,CAAX,CAAZ;AACA,cAAI,UAAU,WAAd,EAA2B;AACzB,iBAAK,MAAL,CAAY,IAAZ,CAAiB,KAAK,CAAL,CAAjB;AACD;AACF;AACF;AACF;;;0BAEK,Q,EAAU;AACd,UAAI,OAAO,KAAK,QAAL,CAAc,QAAd,CAAP,KAAoC,WAAxC,EAAqD;AACnD;AACD;;AAED,UAAI,OAAO,KAAK,QAAL,CAAc,QAAd,CAAX;AACA,UAAI,CAAC,IAAL,EAAW;AACT,eAAO,EAAP;AACD;;AAED,WAAK,IAAI,IAAI,CAAb,EAAgB,IAAI,KAAK,MAAzB,EAAiC,GAAjC,EAAsC;AACpC,aAAK,KAAL,CAAW,KAAK,CAAL,CAAX;AACD;AACD,aAAO,KAAK,QAAL,CAAc,QAAd,CAAP;;AAEA,WAAK,YAAL;AACD;;;wBA3DW;AACV,aAAO,KAAK,MAAZ;AACD;;;wBA2Dc;AACb,UAAI,UAAU,OAAO,IAAP,CAAY,KAAK,KAAjB,CAAd;AACA,cAAQ,IAAR,CAAa,iBAAb;AACA,aAAO,OAAP;AACD;;;;;;kBA/EkB,S;;;;;;;;;;;;;;kBCRG,K;;AAPxB;;;;;;;;AAEA;AACA;AACA,OAAO,kBAAP,GAA4B,OAAO,kBAAP,IAA6B,EAAzD;AACA,IAAI,SAAS,OAAO,kBAApB;;AAEe,SAAS,KAAT,CAAe,SAAf,EAA0B;AACvC,MAAI,aAAa,OAAO,SAAP,KAAsB,QAAvC,EAAiD;AAC/C,QAAI,CAAC,OAAO,cAAP,CAAsB,SAAtB,CAAL,EAAuC;AACrC,aAAO,SAAP,IAAoB,IAAI,KAAJ,CAAU,SAAV,CAApB;AACD;AACD,WAAO,OAAO,SAAP,CAAP;AACD,GALD,MAKO,IAAI,QAAO,SAAP,yCAAO,SAAP,OAAsB,QAAtB,IAAkC,UAAU,KAA5C,IAAqD,UAAU,GAAnE,EAAwE;AAC7E;AACA,WAAO,SAAP;AACD,GAHM,MAGA,IAAI,MAAM,OAAN,CAAc,SAAd,KACP,UAAU,MAAV,IAAoB,CADb,IAEP,OAAO,UAAU,CAAV,CAAP,KAAyB,QAFtB,EAEgC;AACrC,WAAO,MAAM,UAAU,CAAV,CAAN,CAAP;AACD,GAJM,MAIA;AACL,UAAM,IAAI,KAAJ,CAAU,4BAAV,CAAN;AACD;AACF;;IAEK,K;AACJ,iBAAY,IAAZ,EAAkB;AAAA;;AAChB,SAAK,IAAL,GAAY,IAAZ;AACA,SAAK,KAAL,GAAa,EAAb;AACD;;;;yBAEG,I,EAAM;AACR,UAAI,CAAC,IAAD,IAAS,OAAO,IAAP,KAAiB,QAA9B,EAAwC;AACtC,cAAM,IAAI,KAAJ,CAAU,kBAAV,CAAN;AACD;;AAED,UAAI,CAAC,KAAK,KAAL,CAAW,cAAX,CAA0B,IAA1B,CAAL,EACE,KAAK,KAAL,CAAW,IAAX,IAAmB,kBAAQ,IAAR,EAAc,IAAd,CAAnB;AACF,aAAO,KAAK,KAAL,CAAW,IAAX,CAAP;AACD;;;wBAEG,I,EAAM;AACR,UAAI,CAAC,IAAD,IAAS,OAAO,IAAP,KAAiB,QAA9B,EAAwC;AACtC,cAAM,IAAI,KAAJ,CAAU,kBAAV,CAAN;AACD;;AAED,aAAO,KAAK,KAAL,CAAW,cAAX,CAA0B,IAA1B,CAAP;AACD;;;;;;;;;;;;;;;;AC/CH;;;;AACA;;AACA;;AACA;;AACA;;AACA;;AACA;;;;AAEA,IAAM,eAAe,qBAAM,SAAN,CAArB;;AAEA,SAAS,IAAT,CAAc,IAAd,EAAoB;AAClB,SAAO,aAAa,GAAb,CAAiB,IAAjB,CAAP;AACD;;AAED,SAAS,GAAT,CAAa,IAAb,EAAmB;AACjB,SAAO,aAAa,GAAb,CAAiB,IAAjB,CAAP;AACD;;AAED,IAAI,OAAO,KAAX,EAAkB;AAChB,SAAO,KAAP,CAAa,uBAAb,CAAqC,qBAArC,EAA4D,UAAS,OAAT,EAAkB;AAC5E,QAAI,OAAO,QAAQ,KAAf,KAA0B,QAA9B,EAAwC;AACtC,2BAAM,QAAQ,KAAd,EAAqB,GAArB,CAAyB,QAAQ,IAAjC,EAAuC,GAAvC,CAA2C,QAAQ,KAAnD;AACD,KAFD,MAEO;AACL,WAAK,QAAQ,IAAb,EAAmB,GAAnB,CAAuB,QAAQ,KAA/B;AACD;AACF,GAND;AAOD;;AAED,IAAM,YAAY;AAChB,wBADgB;AAEhB,OAAK,IAFW;AAGhB,OAAK,GAHW;AAIhB,6CAJgB;AAKhB,oCALgB;AAMhB;AANgB,CAAlB;;AASA;;;kBAGe,S;;AACf,OAAO,SAAP,GAAmB,SAAnB;;;;;;;;;;;QCrCgB,Q,GAAA,Q;QAWA,I,GAAA,I;AAfhB,IAAI,IAAI,OAAO,MAAf;;AAEA,IAAI,WAAW,EAAf;;AAEO,SAAS,QAAT,CAAkB,GAAlB,EAAuB;AAC5B,WAAS,IAAI,SAAb,IAA0B,GAA1B;AACA,MAAI,OAAO,QAAP,IAAmB,OAAO,QAAP,CAAgB,UAAhB,KAA+B,UAAtD,EAAkE;AAChE,MAAE,YAAM;AACN;AACD,KAFD;AAGD,GAJD,MAIO,IAAI,OAAO,QAAX,EAAqB;AAC1B,eAAW,IAAX,EAAiB,GAAjB;AACD;AACF;;AAEM,SAAS,IAAT,GAAgB;AACrB,SAAO,IAAP,CAAY,QAAZ,EAAsB,OAAtB,CAA8B,UAAS,SAAT,EAAoB;AAChD,QAAI,UAAU,SAAS,SAAT,CAAd;AACA,MAAE,MAAM,QAAQ,SAAhB,EAA2B,GAA3B,CAA+B,wBAA/B,EAAyD,IAAzD,CAA8D,UAAS,CAAT,EAAY,EAAZ,EAAgB;AAC5E,mBAAa,OAAb,EAAsB,EAAtB;AACD,KAFD;AAGD,GALD;AAMD;;AAED;AACA,SAAS,OAAT,CAAiB,GAAjB,EAAsB;AACpB,SAAO,IAAI,OAAJ,CAAY,uCAAZ,EAAqD,MAArD,CAAP;AACD;;AAED,SAAS,MAAT,CAAgB,EAAhB,EAAoB;AAClB,MAAI,MAAM,EAAE,EAAF,CAAV;AACA,SAAO,IAAP,CAAY,QAAZ,EAAsB,OAAtB,CAA8B,UAAS,SAAT,EAAoB;AAChD,QAAI,IAAI,QAAJ,CAAa,SAAb,KAA2B,CAAC,IAAI,QAAJ,CAAa,uBAAb,CAAhC,EAAuE;AACrE,UAAI,UAAU,SAAS,SAAT,CAAd;AACA,mBAAa,OAAb,EAAsB,EAAtB;AACD;AACF,GALD;AAMD;;AAED,SAAS,YAAT,CAAsB,OAAtB,EAA+B,EAA/B,EAAmC;AACjC,MAAI,SAAS,EAAE,EAAF,EAAM,IAAN,CAAW,+CAA+C,QAAQ,GAAG,EAAX,CAA/C,GAAgE,IAA3E,CAAb;AACA,MAAI,OAAO,KAAK,KAAL,CAAW,OAAO,CAAP,EAAU,SAArB,CAAX;;AAEA,MAAI,WAAW,QAAQ,OAAR,CAAgB,EAAhB,EAAoB,IAApB,CAAf;AACA,IAAE,EAAF,EAAM,IAAN,CAAW,oBAAX,EAAiC,QAAjC;AACA,IAAE,EAAF,EAAM,QAAN,CAAe,uBAAf;AACD;;AAED,IAAI,OAAO,KAAX,EAAkB;AAChB,MAAI,eAAe,IAAI,OAAO,KAAP,CAAa,YAAjB,EAAnB;AACA,MAAI,KAAI,OAAO,MAAf;AACA,KAAE,MAAF,CAAS,YAAT,EAAuB;AACrB,UAAM,cAAS,KAAT,EAAgB;AACpB,aAAO,GAAE,KAAF,EAAS,IAAT,CAAc,kBAAd,CAAP;AACD,KAHoB;AAIrB,gBAAY,oBAAS,EAAT,EAAa;AACvB,UAAI,CAAC,GAAE,EAAF,EAAM,QAAN,CAAe,uBAAf,CAAL,EAA8C;AAC5C,eAAO,EAAP;AACD;AACF,KARoB;AASrB,WAAO,eAAS,EAAT,EAAa;AAClB,aAAO,GAAG,EAAV;AACD,KAXoB;AAYrB,cAAU,kBAAS,EAAT,EAAa,CAEtB,CAdoB;AAerB,cAAU,kBAAS,EAAT,EAAa,KAAb,EAAoB,CAE7B,CAjBoB;AAkBrB,oBAAgB,wBAAS,EAAT,EAAa,IAAb,EAAmB,CAElC,CApBoB;AAqBrB,eAAW,mBAAS,EAAT,EAAa,QAAb,EAAuB;AAChC,SAAE,EAAF,EAAM,IAAN,CAAW,oBAAX,EAAiC,MAAjC;AACD,KAvBoB;AAwBrB,iBAAa,qBAAS,EAAT,EAAa;AACxB,SAAE,EAAF,EAAM,IAAN,CAAW,oBAAX,EAAiC,OAAjC;AACD;AA1BoB,GAAvB;AA4BA,SAAO,KAAP,CAAa,aAAb,CAA2B,QAA3B,CAAoC,YAApC,EAAkD,wBAAlD;AACD;;;;;;;;AChFD;;IAAY,K;;AACZ;;;;AAEA,IAAI,IAAI,OAAO,MAAf;;AAEA,MAAM,QAAN,CAAe;AACb,aAAW,+BADE;;AAGb,WAAS,iBAAS,EAAT,EAAa,IAAb,EAAmB;AAC1B;;;;AAIA,QAAI,WAAW,yBAAiB,KAAK,KAAtB,CAAf;;AAEA,QAAI,sBAAJ;AACA,QAAI,MAAM,EAAE,EAAF,CAAV;AACA,QAAI,EAAJ,CAAO,QAAP,EAAiB,wBAAjB,EAA2C,YAAW;AACpD,UAAI,UAAU,IAAI,IAAJ,CAAS,gCAAT,CAAd;AACA,UAAI,QAAQ,MAAR,KAAmB,CAAvB,EAA0B;AACxB,wBAAgB,IAAhB;AACA,iBAAS,KAAT;AACD,OAHD,MAGO;AACL,YAAI,OAAO,EAAX;AACA,gBAAQ,IAAR,CAAa,YAAW;AACtB,eAAK,GAAL,CAAS,KAAK,KAAd,EAAqB,OAArB,CAA6B,UAAS,GAAT,EAAc;AACzC,iBAAK,GAAL,IAAY,IAAZ;AACD,WAFD;AAGD,SAJD;AAKA,YAAI,WAAW,OAAO,IAAP,CAAY,IAAZ,CAAf;AACA,iBAAS,IAAT;AACA,wBAAgB,QAAhB;AACA,iBAAS,GAAT,CAAa,QAAb;AACD;AACF,KAjBD;;AAmBA,WAAO;AACL,eAAS,mBAAW;AAClB,iBAAS,KAAT;AACD,OAHI;AAIL,cAAQ,kBAAW;AACjB,YAAI,aAAJ,EACE,SAAS,GAAT,CAAa,aAAb;AACH;AAPI,KAAP;AASD;AAxCY,CAAf;;;;;;;;ACLA;;IAAY,K;;AACZ;;IAAY,I;;AACZ;;;;AAEA,IAAI,IAAI,OAAO,MAAf;;AAEA,MAAM,QAAN,CAAe;AACb,aAAW,wBADE;;AAGb,WAAS,iBAAS,EAAT,EAAa,IAAb,EAAmB;AAC1B;;;;;;AAMA,QAAI,QAAQ,CAAC,EAAC,OAAO,EAAR,EAAY,OAAO,OAAnB,EAAD,CAAZ;AACA,QAAI,QAAQ,KAAK,aAAL,CAAmB,KAAK,KAAxB,CAAZ;AACA,QAAI,OAAO;AACT,eAAS,MAAM,MAAN,CAAa,KAAb,CADA;AAET,kBAAY,OAFH;AAGT,kBAAY,OAHH;AAIT,mBAAa;AAJJ,KAAX;;AAOA,QAAI,SAAS,EAAE,EAAF,EAAM,IAAN,CAAW,QAAX,EAAqB,CAArB,CAAb;;AAEA,QAAI,YAAY,EAAE,MAAF,EAAU,SAAV,CAAoB,IAApB,EAA0B,CAA1B,EAA6B,SAA7C;;AAEA,QAAI,WAAW,yBAAiB,KAAK,KAAtB,CAAf;;AAEA,QAAI,sBAAJ;AACA,cAAU,EAAV,CAAa,QAAb,EAAuB,YAAW;AAChC,UAAI,UAAU,KAAV,CAAgB,MAAhB,KAA2B,CAA/B,EAAkC;AAChC,wBAAgB,IAAhB;AACA,iBAAS,KAAT;AACD,OAHD,MAGO;AACL,YAAI,OAAO,EAAX;AACA,kBAAU,KAAV,CAAgB,OAAhB,CAAwB,UAAS,KAAT,EAAgB;AACtC,eAAK,GAAL,CAAS,KAAT,EAAgB,OAAhB,CAAwB,UAAS,GAAT,EAAc;AACpC,iBAAK,GAAL,IAAY,IAAZ;AACD,WAFD;AAGD,SAJD;AAKA,YAAI,WAAW,OAAO,IAAP,CAAY,IAAZ,CAAf;AACA,iBAAS,IAAT;AACA,wBAAgB,QAAhB;AACA,iBAAS,GAAT,CAAa,QAAb;AACD;AACF,KAhBD;;AAkBA,WAAO;AACL,eAAS,mBAAW;AAClB,iBAAS,KAAT;AACD,OAHI;AAIL,cAAQ,kBAAW;AACjB,YAAI,aAAJ,EACE,SAAS,GAAT,CAAa,aAAb;AACH;AAPI,KAAP;AASD;AArDY,CAAf;;;;;;;;;;ACNA;;IAAY,K;;AACZ;;;;AAEA,IAAI,IAAI,OAAO,MAAf;AACA,IAAI,WAAW,OAAO,QAAtB;;AAEA,MAAM,QAAN,CAAe;AACb,aAAW,wBADE;;AAGb,WAAS,iBAAS,EAAT,EAAa,IAAb,EAAmB;AAC1B;;;;AAIA,QAAI,WAAW,yBAAiB,KAAK,KAAtB,CAAf;;AAEA,QAAI,OAAO,EAAX;AACA,QAAI,MAAM,EAAE,EAAF,EAAM,IAAN,CAAW,OAAX,CAAV;AACA,QAAI,WAAW,IAAI,IAAJ,CAAS,WAAT,CAAf;AACA,QAAI,aAAa,IAAI,IAAJ,CAAS,aAAT,CAAjB;AACA,QAAI,QAAQ,IAAI,IAAJ,CAAS,OAAT,CAAZ;AACA,QAAI,sBAAJ;;AAEA;AACA,QAAI,aAAa,MAAjB,EAAyB;AACvB,sBAAgB,SAAS,GAAT,EAAhB;AACA,WAAK,QAAL,GAAgB,UAAS,GAAT,EAAc;AAC5B,eAAO,cAAc,UAAd,EAA0B,IAAI,IAAJ,CAAS,GAAT,CAA1B,CAAP;AACD,OAFD;AAID,KAND,MAMO,IAAI,aAAa,UAAjB,EAA6B;AAClC,UAAI,WAAW,IAAI,IAAJ,CAAS,UAAT,CAAf;AACA,UAAI,QAAJ,EACE,gBAAgB,SAAS,QAAT,CAAkB,QAAlB,CAAhB,CADF,KAGE,gBAAgB,QAAhB;;AAEF,WAAK,QAAL,GAAgB,UAAS,GAAT,EAAc;AAC5B,eAAO,cAAc,UAAd,EAA0B,IAAI,IAAJ,CAAS,GAAT,CAA1B,CAAP;AACD,OAFD;AAGD,KAVM,MAUA,IAAI,aAAa,QAAjB,EAA2B;AAChC,UAAI,OAAO,KAAP,KAAiB,WAArB,EACE,KAAK,QAAL,GAAgB,UAAS,GAAT,EAAc;AAC5B,YAAI,SAAS,KAAK,GAAL,CAAS,EAAT,EAAa,KAAb,CAAb;AACA,eAAO,KAAK,KAAL,CAAW,MAAM,MAAjB,IAA2B,MAAlC;AACD,OAHD;AAIH;;AAED,QAAI,cAAJ,CAAmB,IAAnB;;AAEA,aAAS,QAAT,GAAoB;AAClB,UAAI,SAAS,IAAI,IAAJ,CAAS,gBAAT,EAA2B,MAAxC;;AAEA;AACA,UAAI,gBAAJ;AACA,UAAI,WAAW,IAAI,IAAJ,CAAS,WAAT,CAAf;AACA,UAAI,aAAa,MAAjB,EAAyB;AACvB,kBAAU,iBAAS,GAAT,EAAc;AACtB,iBAAO,cAAc,IAAI,IAAJ,CAAS,CAAC,GAAV,CAAd,CAAP;AACD,SAFD;AAGD,OAJD,MAIO,IAAI,aAAa,UAAjB,EAA6B;AAClC,kBAAU,iBAAS,GAAT,EAAc;AACtB;AACA,iBAAO,CAAC,GAAD,GAAO,IAAd;AACD,SAHD;AAID,OALM,MAKA;AACL,kBAAU,iBAAS,GAAT,EAAc;AAAE,iBAAO,CAAC,GAAR;AAAc,SAAxC;AACD;;AAED,UAAI,IAAI,IAAJ,CAAS,gBAAT,EAA2B,OAA3B,CAAmC,IAAnC,KAA4C,QAAhD,EAA0D;AACxD,eAAO,CAAC,QAAQ,OAAO,IAAf,CAAD,EAAuB,QAAQ,OAAO,EAAf,CAAvB,CAAP;AACD,OAFD,MAEO;AACL,eAAO,QAAQ,OAAO,IAAf,CAAP;AACD;AACF;;AAED,QAAI,gBAAgB,IAApB;;AAEA,QAAI,EAAJ,CAAO,6BAAP,EAAsC,UAAS,KAAT,EAAgB;AACpD,UAAI,CAAC,IAAI,IAAJ,CAAS,UAAT,CAAD,IAAyB,CAAC,IAAI,IAAJ,CAAS,WAAT,CAA9B,EAAqD;AAAA,wBAClC,UADkC;AAAA;AAAA,YAC9C,IAD8C;AAAA,YACxC,EADwC;;AAEnD,YAAI,OAAO,EAAX;AACA,aAAK,IAAI,IAAI,CAAb,EAAgB,IAAI,KAAK,MAAL,CAAY,MAAhC,EAAwC,GAAxC,EAA6C;AAC3C,cAAI,MAAM,KAAK,MAAL,CAAY,CAAZ,CAAV;AACA,cAAI,OAAO,IAAP,IAAe,OAAO,EAA1B,EAA8B;AAC5B,iBAAK,IAAL,CAAU,KAAK,IAAL,CAAU,CAAV,CAAV;AACD;AACF;AACD,aAAK,IAAL;AACA,iBAAS,GAAT,CAAa,IAAb;AACA,wBAAgB,IAAhB;AACD;AACF,KAdD;;AAiBA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;AAEA,WAAO;AACL,eAAS,mBAAW;AAClB,iBAAS,KAAT;AACD,OAHI;AAIL,cAAQ,kBAAW;AACjB,YAAI,aAAJ,EACE,SAAS,GAAT,CAAa,aAAb;AACH;AAPI,KAAP;AASD;AApHY,CAAf;;AAwHA;AACA,SAAS,QAAT,CAAkB,CAAlB,EAAqB,MAArB,EAA6B;AAC3B,MAAI,MAAM,EAAE,QAAF,EAAV;AACA,SAAO,IAAI,MAAJ,GAAa,MAApB;AACE,UAAM,MAAM,GAAZ;AADF,GAEA,OAAO,GAAP;AACD;;AAED;AACA;AACA,SAAS,aAAT,CAAuB,IAAvB,EAA6B;AAC3B,MAAI,gBAAgB,IAApB,EAA0B;AACxB,WAAO,KAAK,cAAL,KAAwB,GAAxB,GACA,SAAS,KAAK,WAAL,KAAmB,CAA5B,EAA+B,CAA/B,CADA,GACoC,GADpC,GAEA,SAAS,KAAK,UAAL,EAAT,EAA4B,CAA5B,CAFP;AAID,GALD,MAKO;AACL,WAAO,IAAP;AACD;AACF;;;;;;;;;;;;;;ACjJD;;;;AACA;;;;AACA;;IAAY,I;;;;;;;;AAEZ;;;;;;;;;;;;;;;;IAgBa,e,WAAA,e;AAEX,6BAA4C;AAAA,QAAhC,KAAgC,uEAAxB,IAAwB;AAAA,QAAlB,SAAkB,uEAAN,IAAM;;AAAA;;AAC1C,SAAK,WAAL,GAAmB,sBAAnB;AACA,SAAK,QAAL,GAAgB,IAAI,KAAK,mBAAT,CAA6B,KAAK,WAAlC,CAAhB;;AAEA;AACA,SAAK,MAAL,GAAc,IAAd;AACA;AACA,SAAK,IAAL,GAAY,IAAZ;AACA;AACA,SAAK,eAAL,GAAuB,IAAvB;;AAEA,SAAK,UAAL,GAAkB,KAAK,MAAL,CAAY,EAAE,QAAQ,IAAV,EAAZ,EAA8B,SAA9B,CAAlB;;AAEA,SAAK,QAAL,CAAc,KAAd;AACD;;AAED;;;;;;;;;;;;;;;;;6BAaS,K,EAAO;AAAA;;AACd;AACA,UAAI,KAAK,MAAL,KAAgB,KAApB,EACE;AACF;AACA,UAAI,CAAC,KAAK,MAAN,IAAgB,CAAC,KAArB,EACE;;AAEF,UAAI,KAAK,IAAT,EAAe;AACb,aAAK,IAAL,CAAU,GAAV,CAAc,QAAd,EAAwB,KAAK,eAA7B;AACA,aAAK,IAAL,GAAY,IAAZ;AACA,aAAK,eAAL,GAAuB,IAAvB;AACD;;AAED,WAAK,MAAL,GAAc,KAAd;;AAEA,UAAI,KAAJ,EAAW;AACT,aAAK,IAAL,GAAY,qBAAI,KAAJ,EAAW,GAAX,CAAe,WAAf,CAAZ;AACA,YAAI,MAAM,KAAK,IAAL,CAAU,EAAV,CAAa,QAAb,EAAuB,UAAC,CAAD,EAAO;AACtC,gBAAK,WAAL,CAAiB,OAAjB,CAAyB,QAAzB,EAAmC,CAAnC;AACD,SAFS,CAAV;AAGA,aAAK,eAAL,GAAuB,GAAvB;AACD;AACF;;AAED;;;;;;;;;;;;;;;AAcA;;;;;oCAKgB,S,EAAW;AACzB;AACA,aAAO,KAAK,MAAL,CAAY,EAAZ,EACL,KAAK,UAAL,GAAkB,KAAK,UAAvB,GAAoC,IAD/B,EAEL,YAAY,SAAZ,GAAwB,IAFnB,CAAP;AAGD;;AAED;;;;;;;;;;;;;;;wBAYI,Y,EAAc,S,EAAW;AAC3B,UAAI,KAAK,IAAT,EACE,KAAK,IAAL,CAAU,GAAV,CAAc,YAAd,EAA4B,KAAK,eAAL,CAAqB,SAArB,CAA5B;AACH;;AAED;;;;;;;;;;;;;0BAUM,S,EAAW;AACf,UAAI,KAAK,IAAT,EACE,KAAK,GAAL,CAAS,KAAK,CAAd,EAAiB,KAAK,eAAL,CAAqB,SAArB,CAAjB;AACH;;AAED;;;;;;;;;;;;;uBAUG,S,EAAW,Q,EAAU;AACtB,aAAO,KAAK,QAAL,CAAc,EAAd,CAAiB,SAAjB,EAA4B,QAA5B,CAAP;AACD;;AAED;;;;;;;;;;;wBAQI,S,EAAW,Q,EAAU;AACvB,aAAO,KAAK,QAAL,CAAc,GAAd,CAAkB,SAAlB,EAA6B,QAA7B,CAAP;AACD;;AAED;;;;;;;;4BAKQ;AACN,WAAK,QAAL,CAAc,kBAAd;AACA,WAAK,QAAL,CAAc,IAAd;AACD;;;wBAlFW;AACV,aAAO,KAAK,IAAL,GAAY,KAAK,IAAL,CAAU,GAAV,EAAZ,GAA8B,IAArC;AACD;;;;;;AAmFH;;;;;;;;;AASA;;;;;;;;;;;;;;;;;;;;;QCpLgB,M,GAAA,M;QAeA,W,GAAA,W;QAQA,e,GAAA,e;QAoCA,a,GAAA,a;;;;AA3DT,SAAS,MAAT,CAAgB,MAAhB,EAAoC;AAAA,oCAAT,OAAS;AAAT,WAAS;AAAA;;AACzC,OAAK,IAAI,IAAI,CAAb,EAAgB,IAAI,QAAQ,MAA5B,EAAoC,GAApC,EAAyC;AACvC,QAAI,MAAM,QAAQ,CAAR,CAAV;AACA,QAAI,OAAO,GAAP,KAAgB,WAAhB,IAA+B,QAAQ,IAA3C,EACE;;AAEF,SAAK,IAAI,GAAT,IAAgB,GAAhB,EAAqB;AACnB,UAAI,IAAI,cAAJ,CAAmB,GAAnB,CAAJ,EAA6B;AAC3B,eAAO,GAAP,IAAc,IAAI,GAAJ,CAAd;AACD;AACF;AACF;AACD,SAAO,MAAP;AACD;;AAEM,SAAS,WAAT,CAAqB,IAArB,EAA2B;AAChC,OAAK,IAAI,IAAI,CAAb,EAAgB,IAAI,KAAK,MAAzB,EAAiC,GAAjC,EAAsC;AACpC,QAAI,KAAK,CAAL,KAAW,KAAK,IAAE,CAAP,CAAf,EAA0B;AACxB,YAAM,IAAI,KAAJ,CAAU,0CAAV,CAAN;AACD;AACF;AACF;;AAEM,SAAS,eAAT,CAAyB,CAAzB,EAA4B,CAA5B,EAA+B;AACpC,MAAI,MAAM,CAAV;AACA,MAAI,MAAM,CAAV;;AAEA,MAAI,CAAC,CAAL,EAAQ,IAAI,EAAJ;AACR,MAAI,CAAC,CAAL,EAAQ,IAAI,EAAJ;;AAER,MAAI,SAAS,EAAb;AACA,MAAI,SAAS,EAAb;;AAEA,cAAY,CAAZ;AACA,cAAY,CAAZ;;AAEA,SAAO,MAAM,EAAE,MAAR,IAAkB,MAAM,EAAE,MAAjC,EAAyC;AACvC,QAAI,EAAE,GAAF,MAAW,EAAE,GAAF,CAAf,EAAuB;AACrB;AACA;AACD,KAHD,MAGO,IAAI,EAAE,GAAF,IAAS,EAAE,GAAF,CAAb,EAAqB;AAC1B,aAAO,IAAP,CAAY,EAAE,KAAF,CAAZ;AACD,KAFM,MAEA;AACL,aAAO,IAAP,CAAY,EAAE,KAAF,CAAZ;AACD;AACF;;AAED,MAAI,MAAM,EAAE,MAAZ,EACE,SAAS,OAAO,MAAP,CAAc,EAAE,KAAF,CAAQ,GAAR,CAAd,CAAT;AACF,MAAI,MAAM,EAAE,MAAZ,EACE,SAAS,OAAO,MAAP,CAAc,EAAE,KAAF,CAAQ,GAAR,CAAd,CAAT;AACF,SAAO;AACL,aAAS,MADJ;AAEL,WAAO;AAFF,GAAP;AAID;;AAED;AACA;AACO,SAAS,aAAT,CAAuB,EAAvB,EAA2B;AAChC,MAAI,QAAQ,EAAZ;AACA,MAAI,eAAJ;AACA,OAAK,IAAI,IAAT,IAAiB,EAAjB,EAAqB;AACnB,QAAI,GAAG,cAAH,CAAkB,IAAlB,CAAJ,EACE,MAAM,IAAN,CAAW,IAAX;AACF,QAAI,QAAO,GAAG,IAAH,CAAP,MAAqB,QAArB,IAAiC,OAAO,GAAG,IAAH,EAAS,MAAhB,KAA4B,WAAjE,EAA8E;AAC5E,YAAM,IAAI,KAAJ,CAAU,2BAAV,CAAN;AACD,KAFD,MAEO,IAAI,OAAO,MAAP,KAAmB,WAAnB,IAAkC,WAAW,GAAG,IAAH,EAAS,MAA1D,EAAkE;AACvE,YAAM,IAAI,KAAJ,CAAU,8CAAV,CAAN;AACD;AACD,aAAS,GAAG,IAAH,EAAS,MAAlB;AACD;AACD,MAAI,UAAU,EAAd;AACA,MAAI,aAAJ;AACA,OAAK,IAAI,MAAM,CAAf,EAAkB,MAAM,MAAxB,EAAgC,KAAhC,EAAuC;AACrC,WAAO,EAAP;AACA,SAAK,IAAI,MAAM,CAAf,EAAkB,MAAM,MAAM,MAA9B,EAAsC,KAAtC,EAA6C;AAC3C,WAAK,MAAM,GAAN,CAAL,IAAmB,GAAG,MAAM,GAAN,CAAH,EAAe,GAAf,CAAnB;AACD;AACD,YAAQ,IAAR,CAAa,IAAb;AACD;AACD,SAAO,OAAP;AACD;;AAED;;;;;;;IAMa,mB,WAAA,mB;AACX,+BAAY,OAAZ,EAAqB;AAAA;;AACnB,SAAK,QAAL,GAAgB,OAAhB;AACA,SAAK,KAAL,GAAa,EAAb;AACD;;;;uBAEE,S,EAAW,Q,EAAU;AACtB,UAAI,MAAM,KAAK,QAAL,CAAc,EAAd,CAAiB,SAAjB,EAA4B,QAA5B,CAAV;AACA,WAAK,KAAL,CAAW,GAAX,IAAkB,SAAlB;AACA,aAAO,GAAP;AACD;;;wBAEG,S,EAAW,Q,EAAU;AACvB,UAAI,MAAM,KAAK,QAAL,CAAc,GAAd,CAAkB,SAAlB,EAA6B,QAA7B,CAAV;AACA,UAAI,GAAJ,EAAS;AACP,eAAO,KAAK,KAAL,CAAW,GAAX,CAAP;AACD;AACD,aAAO,GAAP;AACD;;;yCAEoB;AAAA;;AACnB,UAAI,eAAe,KAAK,KAAxB;AACA,WAAK,KAAL,GAAa,EAAb;AACA,aAAO,IAAP,CAAY,YAAZ,EAA0B,OAA1B,CAAkC,UAAC,GAAD,EAAS;AACzC,cAAK,QAAL,CAAc,GAAd,CAAkB,aAAa,GAAb,CAAlB,EAAqC,GAArC;AACD,OAFD;AAGD;;;;;;;;;;;;;;;;;;ACpHH;;;;;;;;IAEqB,G;AACnB,eAAY,KAAZ,EAAmB,IAAnB,EAAyB,YAAa,KAAtC,EAA6C;AAAA;;AAC3C,SAAK,MAAL,GAAc,KAAd;AACA,SAAK,KAAL,GAAa,IAAb;AACA,SAAK,MAAL,GAAc,KAAd;AACA,SAAK,OAAL,GAAe,sBAAf;AACD;;;;0BAEK;AACJ,aAAO,KAAK,MAAZ;AACD;;;wBAEG,K,EAAO,YAAa,K,EAAO;AAC7B,UAAI,KAAK,MAAL,KAAgB,KAApB,EAA2B;AACzB;AACA;AACD;AACD,UAAI,WAAW,KAAK,MAApB;AACA,WAAK,MAAL,GAAc,KAAd;AACA;AACA,UAAI,MAAM,EAAV;AACA,UAAI,SAAS,QAAO,KAAP,yCAAO,KAAP,OAAkB,QAA/B,EAAyC;AACvC,aAAK,IAAI,CAAT,IAAc,KAAd,EAAqB;AACnB,cAAI,MAAM,cAAN,CAAqB,CAArB,CAAJ,EACE,IAAI,CAAJ,IAAS,MAAM,CAAN,CAAT;AACH;AACF;AACD,UAAI,QAAJ,GAAe,QAAf;AACA,UAAI,KAAJ,GAAY,KAAZ;AACA,WAAK,OAAL,CAAa,OAAb,CAAqB,QAArB,EAA+B,GAA/B,EAAoC,IAApC;;AAEA;AACA;AACA,UAAI,OAAO,KAAP,IAAgB,OAAO,KAAP,CAAa,aAAjC,EAAgD;AAC9C,eAAO,KAAP,CAAa,aAAb,CACE,mBACG,KAAK,MAAL,CAAY,IAAZ,KAAqB,IAArB,GAA4B,KAAK,MAAL,CAAY,IAAZ,GAAmB,GAA/C,GAAqD,EADxD,IAEE,KAAK,KAHT,EAIE,OAAO,KAAP,KAAkB,WAAlB,GAAgC,IAAhC,GAAuC,KAJzC;AAMD;AACF;;;uBAEE,S,EAAW,Q,EAAU;AACtB,aAAO,KAAK,OAAL,CAAa,EAAb,CAAgB,SAAhB,EAA2B,QAA3B,CAAP;AACD;;;wBAEG,S,EAAW,Q,EAAU;AACvB,aAAO,KAAK,OAAL,CAAa,GAAb,CAAiB,SAAjB,EAA4B,QAA5B,CAAP;AACD;;;;;;kBAjDkB,G", - "file": "generated.js", - "sourceRoot": "", - "sourcesContent": [ - "(function(){function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require==\"function\"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error(\"Cannot find module '\"+o+\"'\");throw f.code=\"MODULE_NOT_FOUND\",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require==\"function\"&&require;for(var o=0;o {\n this._eventRelay.trigger(\"change\", e, this);\n });\n this._varOnChangeSub = sub;\n }\n }\n\n /**\n * Combine the given `extraInfo` (if any) with the handle's default\n * `_extraInfo` (if any).\n * @private\n */\n _mergeExtraInfo(extraInfo) {\n return util.extend({},\n this._extraInfo ? this._extraInfo : null,\n extraInfo ? extraInfo : null);\n }\n\n /**\n * Close the handle. This clears this handle's contribution to the filter set,\n * and unsubscribes all event listeners.\n */\n close() {\n this._emitter.removeAllListeners();\n this.clear();\n this.setGroup(null);\n }\n\n /**\n * Clear this handle's contribution to the filter set.\n *\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any options that were\n * passed into the `FilterHandle` constructor).\n * \n * @fires FilterHandle#change\n */\n clear(extraInfo) {\n if (!this._filterSet)\n return;\n this._filterSet.clear(this._id);\n this._onChange(extraInfo);\n }\n\n /**\n * Set this handle's contribution to the filter set. This array should consist\n * of the keys of the rows that _should_ be displayed; any keys that are not\n * present in the array will be considered _filtered out_. Note that multiple\n * `FilterHandle` instances in the group may each contribute an array of keys,\n * and only those keys that appear in _all_ of the arrays make it through the\n * filter.\n *\n * @param {string[]} keys - Empty array, or array of keys. To clear the\n * filter, don't pass an empty array; instead, use the\n * {@link FilterHandle#clear} method.\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any options that were\n * passed into the `FilterHandle` constructor).\n * \n * @fires FilterHandle#change\n */\n set(keys, extraInfo) {\n if (!this._filterSet)\n return;\n this._filterSet.update(this._id, keys);\n this._onChange(extraInfo);\n }\n\n /**\n * @return {string[]|null} - Either: 1) an array of keys that made it through\n * all of the `FilterHandle` instances, or, 2) `null`, which means no filter\n * is being applied (all data should be displayed).\n */\n get filteredKeys() {\n return this._filterSet ? this._filterSet.value : null;\n }\n\n /**\n * Subscribe to events on this `FilterHandle`.\n *\n * @param {string} eventType - Indicates the type of events to listen to.\n * Currently, only `\"change\"` is supported.\n * @param {FilterHandle~listener} listener - The callback function that\n * will be invoked when the event occurs.\n * @return {string} - A token to pass to {@link FilterHandle#off} to cancel\n * this subscription.\n */\n on(eventType, listener) {\n return this._emitter.on(eventType, listener);\n }\n\n /**\n * Cancel event subscriptions created by {@link FilterHandle#on}.\n *\n * @param {string} eventType - The type of event to unsubscribe.\n * @param {string|FilterHandle~listener} listener - Either the callback\n * function previously passed into {@link FilterHandle#on}, or the\n * string that was returned from {@link FilterHandle#on}.\n */\n off(eventType, listener) {\n return this._emitter.off(eventType, listener);\n }\n\n _onChange(extraInfo) {\n if (!this._filterSet)\n return;\n this._filterVar.set(this._filterSet.value, this._mergeExtraInfo(extraInfo));\n }\n\n /**\n * @callback FilterHandle~listener\n * @param {Object} event - An object containing details of the event. For\n * `\"change\"` events, this includes the properties `value` (the new\n * value of the filter set, or `null` if no filter set is active),\n * `oldValue` (the previous value of the filter set), and `sender` (the\n * `FilterHandle` instance that made the change).\n */\n\n}\n\n/**\n * @event FilterHandle#change\n * @type {object}\n * @property {object} value - The new value of the filter set, or `null`\n * if no filter set is active.\n * @property {object} oldValue - The previous value of the filter set.\n * @property {FilterHandle} sender - The `FilterHandle` instance that\n * changed the value.\n */\n", - "import { diffSortedLists } from \"./util\";\n\nfunction naturalComparator(a, b) {\n if (a === b) {\n return 0;\n } else if (a < b) {\n return -1;\n } else if (a > b) {\n return 1;\n }\n}\n\n/**\n * @private\n */\nexport default class FilterSet {\n constructor() {\n this.reset();\n }\n\n reset() {\n // Key: handle ID, Value: array of selected keys, or null\n this._handles = {};\n // Key: key string, Value: count of handles that include it\n this._keys = {};\n this._value = null;\n this._activeHandles = 0;\n }\n\n get value() {\n return this._value;\n }\n\n update(handleId, keys) {\n if (keys !== null) {\n keys = keys.slice(0); // clone before sorting\n keys.sort(naturalComparator);\n }\n\n let {added, removed} = diffSortedLists(this._handles[handleId], keys);\n this._handles[handleId] = keys;\n\n for (let i = 0; i < added.length; i++) {\n this._keys[added[i]] = (this._keys[added[i]] || 0) + 1;\n }\n for (let i = 0; i < removed.length; i++) {\n this._keys[removed[i]]--;\n }\n\n this._updateValue(keys);\n }\n\n /**\n * @param {string[]} keys Sorted array of strings that indicate\n * a superset of possible keys.\n * @private\n */\n _updateValue(keys = this._allKeys) {\n let handleCount = Object.keys(this._handles).length;\n if (handleCount === 0) {\n this._value = null;\n } else {\n this._value = [];\n for (let i = 0; i < keys.length; i++) {\n let count = this._keys[keys[i]];\n if (count === handleCount) {\n this._value.push(keys[i]);\n }\n }\n }\n }\n\n clear(handleId) {\n if (typeof(this._handles[handleId]) === \"undefined\") {\n return;\n }\n\n let keys = this._handles[handleId];\n if (!keys) {\n keys = [];\n }\n\n for (let i = 0; i < keys.length; i++) {\n this._keys[keys[i]]--;\n }\n delete this._handles[handleId];\n\n this._updateValue();\n }\n\n get _allKeys() {\n let allKeys = Object.keys(this._keys);\n allKeys.sort(naturalComparator);\n return allKeys;\n }\n}\n", - "import Var from \"./var\";\n\n// Use a global so that multiple copies of crosstalk.js can be loaded and still\n// have groups behave as singletons across all copies.\nglobal.__crosstalk_groups = global.__crosstalk_groups || {};\nlet groups = global.__crosstalk_groups;\n\nexport default function group(groupName) {\n if (groupName && typeof(groupName) === \"string\") {\n if (!groups.hasOwnProperty(groupName)) {\n groups[groupName] = new Group(groupName);\n }\n return groups[groupName];\n } else if (typeof(groupName) === \"object\" && groupName._vars && groupName.var) {\n // Appears to already be a group object\n return groupName;\n } else if (Array.isArray(groupName) &&\n groupName.length == 1 &&\n typeof(groupName[0]) === \"string\") {\n return group(groupName[0]);\n } else {\n throw new Error(\"Invalid groupName argument\");\n }\n}\n\nclass Group {\n constructor(name) {\n this.name = name;\n this._vars = {};\n }\n\n var(name) {\n if (!name || typeof(name) !== \"string\") {\n throw new Error(\"Invalid var name\");\n }\n\n if (!this._vars.hasOwnProperty(name))\n this._vars[name] = new Var(this, name);\n return this._vars[name];\n }\n\n has(name) {\n if (!name || typeof(name) !== \"string\") {\n throw new Error(\"Invalid var name\");\n }\n\n return this._vars.hasOwnProperty(name);\n }\n}\n", - "import group from \"./group\";\nimport { SelectionHandle } from \"./selection\";\nimport { FilterHandle } from \"./filter\";\nimport { bind } from \"./input\";\nimport \"./input_selectize\";\nimport \"./input_checkboxgroup\";\nimport \"./input_slider\";\n\nconst defaultGroup = group(\"default\");\n\nfunction var_(name) {\n return defaultGroup.var(name);\n}\n\nfunction has(name) {\n return defaultGroup.has(name);\n}\n\nif (global.Shiny) {\n global.Shiny.addCustomMessageHandler(\"update-client-value\", function(message) {\n if (typeof(message.group) === \"string\") {\n group(message.group).var(message.name).set(message.value);\n } else {\n var_(message.name).set(message.value);\n }\n });\n}\n\nconst crosstalk = {\n group: group,\n var: var_,\n has: has,\n SelectionHandle: SelectionHandle,\n FilterHandle: FilterHandle,\n bind: bind\n};\n\n/**\n * @namespace crosstalk\n */\nexport default crosstalk;\nglobal.crosstalk = crosstalk;\n", - "let $ = global.jQuery;\n\nlet bindings = {};\n\nexport function register(reg) {\n bindings[reg.className] = reg;\n if (global.document && global.document.readyState !== \"complete\") {\n $(() => {\n bind();\n });\n } else if (global.document) {\n setTimeout(bind, 100);\n }\n}\n\nexport function bind() {\n Object.keys(bindings).forEach(function(className) {\n let binding = bindings[className];\n $(\".\" + binding.className).not(\".crosstalk-input-bound\").each(function(i, el) {\n bindInstance(binding, el);\n });\n });\n}\n\n// Escape jQuery identifier\nfunction $escape(val) {\n return val.replace(/([!\"#$%&'()*+,./:;<=>?@[\\\\\\]^`{|}~])/g, \"\\\\$1\");\n}\n\nfunction bindEl(el) {\n let $el = $(el);\n Object.keys(bindings).forEach(function(className) {\n if ($el.hasClass(className) && !$el.hasClass(\"crosstalk-input-bound\")) {\n let binding = bindings[className];\n bindInstance(binding, el);\n }\n });\n}\n\nfunction bindInstance(binding, el) {\n let jsonEl = $(el).find(\"script[type='application/json'][data-for='\" + $escape(el.id) + \"']\");\n let data = JSON.parse(jsonEl[0].innerText);\n\n let instance = binding.factory(el, data);\n $(el).data(\"crosstalk-instance\", instance);\n $(el).addClass(\"crosstalk-input-bound\");\n}\n\nif (global.Shiny) {\n let inputBinding = new global.Shiny.InputBinding();\n let $ = global.jQuery;\n $.extend(inputBinding, {\n find: function(scope) {\n return $(scope).find(\".crosstalk-input\");\n },\n initialize: function(el) {\n if (!$(el).hasClass(\"crosstalk-input-bound\")) {\n bindEl(el);\n }\n },\n getId: function(el) {\n return el.id;\n },\n getValue: function(el) {\n\n },\n setValue: function(el, value) {\n\n },\n receiveMessage: function(el, data) {\n\n },\n subscribe: function(el, callback) {\n $(el).data(\"crosstalk-instance\").resume();\n },\n unsubscribe: function(el) {\n $(el).data(\"crosstalk-instance\").suspend();\n }\n });\n global.Shiny.inputBindings.register(inputBinding, \"crosstalk.inputBinding\");\n}\n", - "import * as input from \"./input\";\nimport { FilterHandle } from \"./filter\";\n\nlet $ = global.jQuery;\n\ninput.register({\n className: \"crosstalk-input-checkboxgroup\",\n\n factory: function(el, data) {\n /*\n * map: {\"groupA\": [\"keyA\", \"keyB\", ...], ...}\n * group: \"ct-groupname\"\n */\n let ctHandle = new FilterHandle(data.group);\n\n let lastKnownKeys;\n let $el = $(el);\n $el.on(\"change\", \"input[type='checkbox']\", function() {\n let checked = $el.find(\"input[type='checkbox']:checked\");\n if (checked.length === 0) {\n lastKnownKeys = null;\n ctHandle.clear();\n } else {\n let keys = {};\n checked.each(function() {\n data.map[this.value].forEach(function(key) {\n keys[key] = true;\n });\n });\n let keyArray = Object.keys(keys);\n keyArray.sort();\n lastKnownKeys = keyArray;\n ctHandle.set(keyArray);\n }\n });\n\n return {\n suspend: function() {\n ctHandle.clear();\n },\n resume: function() {\n if (lastKnownKeys)\n ctHandle.set(lastKnownKeys);\n }\n };\n }\n});\n", - "import * as input from \"./input\";\nimport * as util from \"./util\";\nimport { FilterHandle } from \"./filter\";\n\nlet $ = global.jQuery;\n\ninput.register({\n className: \"crosstalk-input-select\",\n\n factory: function(el, data) {\n /*\n * items: {value: [...], label: [...]}\n * map: {\"groupA\": [\"keyA\", \"keyB\", ...], ...}\n * group: \"ct-groupname\"\n */\n\n let first = [{value: \"\", label: \"(All)\"}];\n let items = util.dataframeToD3(data.items);\n let opts = {\n options: first.concat(items),\n valueField: \"value\",\n labelField: \"label\",\n searchField: \"label\"\n };\n\n let select = $(el).find(\"select\")[0];\n\n let selectize = $(select).selectize(opts)[0].selectize;\n\n let ctHandle = new FilterHandle(data.group);\n\n let lastKnownKeys;\n selectize.on(\"change\", function() {\n if (selectize.items.length === 0) {\n lastKnownKeys = null;\n ctHandle.clear();\n } else {\n let keys = {};\n selectize.items.forEach(function(group) {\n data.map[group].forEach(function(key) {\n keys[key] = true;\n });\n });\n let keyArray = Object.keys(keys);\n keyArray.sort();\n lastKnownKeys = keyArray;\n ctHandle.set(keyArray);\n }\n });\n\n return {\n suspend: function() {\n ctHandle.clear();\n },\n resume: function() {\n if (lastKnownKeys)\n ctHandle.set(lastKnownKeys);\n }\n };\n }\n});\n", - "import * as input from \"./input\";\nimport { FilterHandle } from \"./filter\";\n\nlet $ = global.jQuery;\nlet strftime = global.strftime;\n\ninput.register({\n className: \"crosstalk-input-slider\",\n\n factory: function(el, data) {\n /*\n * map: {\"groupA\": [\"keyA\", \"keyB\", ...], ...}\n * group: \"ct-groupname\"\n */\n let ctHandle = new FilterHandle(data.group);\n\n let opts = {};\n let $el = $(el).find(\"input\");\n let dataType = $el.data(\"data-type\");\n let timeFormat = $el.data(\"time-format\");\n let round = $el.data(\"round\");\n let timeFormatter;\n\n // Set up formatting functions\n if (dataType === \"date\") {\n timeFormatter = strftime.utc();\n opts.prettify = function(num) {\n return timeFormatter(timeFormat, new Date(num));\n };\n\n } else if (dataType === \"datetime\") {\n let timezone = $el.data(\"timezone\");\n if (timezone)\n timeFormatter = strftime.timezone(timezone);\n else\n timeFormatter = strftime;\n\n opts.prettify = function(num) {\n return timeFormatter(timeFormat, new Date(num));\n };\n } else if (dataType === \"number\") {\n if (typeof round !== \"undefined\")\n opts.prettify = function(num) {\n let factor = Math.pow(10, round);\n return Math.round(num * factor) / factor;\n };\n }\n\n $el.ionRangeSlider(opts);\n\n function getValue() {\n let result = $el.data(\"ionRangeSlider\").result;\n\n // Function for converting numeric value from slider to appropriate type.\n let convert;\n let dataType = $el.data(\"data-type\");\n if (dataType === \"date\") {\n convert = function(val) {\n return formatDateUTC(new Date(+val));\n };\n } else if (dataType === \"datetime\") {\n convert = function(val) {\n // Convert ms to s\n return +val / 1000;\n };\n } else {\n convert = function(val) { return +val; };\n }\n\n if ($el.data(\"ionRangeSlider\").options.type === \"double\") {\n return [convert(result.from), convert(result.to)];\n } else {\n return convert(result.from);\n }\n }\n\n let lastKnownKeys = null;\n\n $el.on(\"change.crosstalkSliderInput\", function(event) {\n if (!$el.data(\"updating\") && !$el.data(\"animating\")) {\n let [from, to] = getValue();\n let keys = [];\n for (let i = 0; i < data.values.length; i++) {\n let val = data.values[i];\n if (val >= from && val <= to) {\n keys.push(data.keys[i]);\n }\n }\n keys.sort();\n ctHandle.set(keys);\n lastKnownKeys = keys;\n }\n });\n\n\n // let $el = $(el);\n // $el.on(\"change\", \"input[type=\"checkbox\"]\", function() {\n // let checked = $el.find(\"input[type=\"checkbox\"]:checked\");\n // if (checked.length === 0) {\n // ctHandle.clear();\n // } else {\n // let keys = {};\n // checked.each(function() {\n // data.map[this.value].forEach(function(key) {\n // keys[key] = true;\n // });\n // });\n // let keyArray = Object.keys(keys);\n // keyArray.sort();\n // ctHandle.set(keyArray);\n // }\n // });\n\n return {\n suspend: function() {\n ctHandle.clear();\n },\n resume: function() {\n if (lastKnownKeys)\n ctHandle.set(lastKnownKeys);\n }\n };\n }\n});\n\n\n// Convert a number to a string with leading zeros\nfunction padZeros(n, digits) {\n let str = n.toString();\n while (str.length < digits)\n str = \"0\" + str;\n return str;\n}\n\n// Given a Date object, return a string in yyyy-mm-dd format, using the\n// UTC date. This may be a day off from the date in the local time zone.\nfunction formatDateUTC(date) {\n if (date instanceof Date) {\n return date.getUTCFullYear() + \"-\" +\n padZeros(date.getUTCMonth()+1, 2) + \"-\" +\n padZeros(date.getUTCDate(), 2);\n\n } else {\n return null;\n }\n}\n", - "import Events from \"./events\";\nimport grp from \"./group\";\nimport * as util from \"./util\";\n\n/**\n * Use this class to read and write (and listen for changes to) the selection\n * for a Crosstalk group. This is intended to be used for linked brushing.\n *\n * If two (or more) `SelectionHandle` instances in the same webpage share the\n * same group name, they will share the same state. Setting the selection using\n * one `SelectionHandle` instance will result in the `value` property instantly\n * changing across the others, and `\"change\"` event listeners on all instances\n * (including the one that initiated the sending) will fire.\n *\n * @param {string} [group] - The name of the Crosstalk group, or if none,\n * null or undefined (or any other falsy value). This can be changed later\n * via the [SelectionHandle#setGroup](#setGroup) method.\n * @param {Object} [extraInfo] - An object whose properties will be copied to\n * the event object whenever an event is emitted.\n */\nexport class SelectionHandle {\n\n constructor(group = null, extraInfo = null) {\n this._eventRelay = new Events();\n this._emitter = new util.SubscriptionTracker(this._eventRelay);\n\n // Name of the group we're currently tracking, if any. Can change over time.\n this._group = null;\n // The Var we're currently tracking, if any. Can change over time.\n this._var = null;\n // The event handler subscription we currently have on var.on(\"change\").\n this._varOnChangeSub = null;\n\n this._extraInfo = util.extend({ sender: this }, extraInfo);\n\n this.setGroup(group);\n }\n\n /**\n * Changes the Crosstalk group membership of this SelectionHandle. The group\n * being switched away from (if any) will not have its selection value\n * modified as a result of calling `setGroup`, even if this handle was the\n * most recent handle to set the selection of the group.\n *\n * The group being switched to (if any) will also not have its selection value\n * modified as a result of calling `setGroup`. If you want to set the\n * selection value of the new group, call `set` explicitly.\n *\n * @param {string} group - The name of the Crosstalk group, or null (or\n * undefined) to clear the group.\n */\n setGroup(group) {\n // If group is unchanged, do nothing\n if (this._group === group)\n return;\n // Treat null, undefined, and other falsy values the same\n if (!this._group && !group)\n return;\n\n if (this._var) {\n this._var.off(\"change\", this._varOnChangeSub);\n this._var = null;\n this._varOnChangeSub = null;\n }\n\n this._group = group;\n\n if (group) {\n this._var = grp(group).var(\"selection\");\n let sub = this._var.on(\"change\", (e) => {\n this._eventRelay.trigger(\"change\", e, this);\n });\n this._varOnChangeSub = sub;\n }\n }\n\n /**\n * Retrieves the current selection for the group represented by this\n * `SelectionHandle`.\n *\n * - If no selection is active, then this value will be falsy.\n * - If a selection is active, but no data points are selected, then this\n * value will be an empty array.\n * - If a selection is active, and data points are selected, then the keys\n * of the selected data points will be present in the array.\n */\n get value() {\n return this._var ? this._var.get() : null;\n }\n\n /**\n * Combines the given `extraInfo` (if any) with the handle's default\n * `_extraInfo` (if any).\n * @private\n */\n _mergeExtraInfo(extraInfo) {\n // Important incidental effect: shallow clone is returned\n return util.extend({},\n this._extraInfo ? this._extraInfo : null,\n extraInfo ? extraInfo : null);\n }\n\n /**\n * Overwrites the current selection for the group, and raises the `\"change\"`\n * event among all of the group's '`SelectionHandle` instances (including\n * this one).\n *\n * @fires SelectionHandle#change\n * @param {string[]} selectedKeys - Falsy, empty array, or array of keys (see\n * {@link SelectionHandle#value}).\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any options that were\n * passed into the `SelectionHandle` constructor).\n */\n set(selectedKeys, extraInfo) {\n if (this._var)\n this._var.set(selectedKeys, this._mergeExtraInfo(extraInfo));\n }\n\n /**\n * Overwrites the current selection for the group, and raises the `\"change\"`\n * event among all of the group's '`SelectionHandle` instances (including\n * this one).\n *\n * @fires SelectionHandle#change\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any that were passed\n * into the `SelectionHandle` constructor).\n */\n clear(extraInfo) {\n if (this._var)\n this.set(void 0, this._mergeExtraInfo(extraInfo));\n }\n\n /**\n * Subscribes to events on this `SelectionHandle`.\n *\n * @param {string} eventType - Indicates the type of events to listen to.\n * Currently, only `\"change\"` is supported.\n * @param {SelectionHandle~listener} listener - The callback function that\n * will be invoked when the event occurs.\n * @return {string} - A token to pass to {@link SelectionHandle#off} to cancel\n * this subscription.\n */\n on(eventType, listener) {\n return this._emitter.on(eventType, listener);\n }\n\n /**\n * Cancels event subscriptions created by {@link SelectionHandle#on}.\n *\n * @param {string} eventType - The type of event to unsubscribe.\n * @param {string|SelectionHandle~listener} listener - Either the callback\n * function previously passed into {@link SelectionHandle#on}, or the\n * string that was returned from {@link SelectionHandle#on}.\n */\n off(eventType, listener) {\n return this._emitter.off(eventType, listener);\n }\n\n /**\n * Shuts down the `SelectionHandle` object.\n *\n * Removes all event listeners that were added through this handle.\n */\n close() {\n this._emitter.removeAllListeners();\n this.setGroup(null);\n }\n}\n\n/**\n * @callback SelectionHandle~listener\n * @param {Object} event - An object containing details of the event. For\n * `\"change\"` events, this includes the properties `value` (the new\n * value of the selection, or `undefined` if no selection is active),\n * `oldValue` (the previous value of the selection), and `sender` (the\n * `SelectionHandle` instance that made the change).\n */\n\n/**\n * @event SelectionHandle#change\n * @type {object}\n * @property {object} value - The new value of the selection, or `undefined`\n * if no selection is active.\n * @property {object} oldValue - The previous value of the selection.\n * @property {SelectionHandle} sender - The `SelectionHandle` instance that\n * changed the value.\n */\n", - "export function extend(target, ...sources) {\n for (let i = 0; i < sources.length; i++) {\n let src = sources[i];\n if (typeof(src) === \"undefined\" || src === null)\n continue;\n\n for (let key in src) {\n if (src.hasOwnProperty(key)) {\n target[key] = src[key];\n }\n }\n }\n return target;\n}\n\nexport function checkSorted(list) {\n for (let i = 1; i < list.length; i++) {\n if (list[i] <= list[i-1]) {\n throw new Error(\"List is not sorted or contains duplicate\");\n }\n }\n}\n\nexport function diffSortedLists(a, b) {\n let i_a = 0;\n let i_b = 0;\n\n if (!a) a = [];\n if (!b) b = [];\n\n let a_only = [];\n let b_only = [];\n\n checkSorted(a);\n checkSorted(b);\n\n while (i_a < a.length && i_b < b.length) {\n if (a[i_a] === b[i_b]) {\n i_a++;\n i_b++;\n } else if (a[i_a] < b[i_b]) {\n a_only.push(a[i_a++]);\n } else {\n b_only.push(b[i_b++]);\n }\n }\n\n if (i_a < a.length)\n a_only = a_only.concat(a.slice(i_a));\n if (i_b < b.length)\n b_only = b_only.concat(b.slice(i_b));\n return {\n removed: a_only,\n added: b_only\n };\n}\n\n// Convert from wide: { colA: [1,2,3], colB: [4,5,6], ... }\n// to long: [ {colA: 1, colB: 4}, {colA: 2, colB: 5}, ... ]\nexport function dataframeToD3(df) {\n let names = [];\n let length;\n for (let name in df) {\n if (df.hasOwnProperty(name))\n names.push(name);\n if (typeof(df[name]) !== \"object\" || typeof(df[name].length) === \"undefined\") {\n throw new Error(\"All fields must be arrays\");\n } else if (typeof(length) !== \"undefined\" && length !== df[name].length) {\n throw new Error(\"All fields must be arrays of the same length\");\n }\n length = df[name].length;\n }\n let results = [];\n let item;\n for (let row = 0; row < length; row++) {\n item = {};\n for (let col = 0; col < names.length; col++) {\n item[names[col]] = df[names[col]][row];\n }\n results.push(item);\n }\n return results;\n}\n\n/**\n * Keeps track of all event listener additions/removals and lets all active\n * listeners be removed with a single operation.\n *\n * @private\n */\nexport class SubscriptionTracker {\n constructor(emitter) {\n this._emitter = emitter;\n this._subs = {};\n }\n\n on(eventType, listener) {\n let sub = this._emitter.on(eventType, listener);\n this._subs[sub] = eventType;\n return sub;\n }\n\n off(eventType, listener) {\n let sub = this._emitter.off(eventType, listener);\n if (sub) {\n delete this._subs[sub];\n }\n return sub;\n }\n\n removeAllListeners() {\n let current_subs = this._subs;\n this._subs = {};\n Object.keys(current_subs).forEach((sub) => {\n this._emitter.off(current_subs[sub], sub);\n });\n }\n}\n", - "import Events from \"./events\";\n\nexport default class Var {\n constructor(group, name, /*optional*/ value) {\n this._group = group;\n this._name = name;\n this._value = value;\n this._events = new Events();\n }\n\n get() {\n return this._value;\n }\n\n set(value, /*optional*/ event) {\n if (this._value === value) {\n // Do nothing; the value hasn't changed\n return;\n }\n let oldValue = this._value;\n this._value = value;\n // Alert JavaScript listeners that the value has changed\n let evt = {};\n if (event && typeof(event) === \"object\") {\n for (let k in event) {\n if (event.hasOwnProperty(k))\n evt[k] = event[k];\n }\n }\n evt.oldValue = oldValue;\n evt.value = value;\n this._events.trigger(\"change\", evt, this);\n\n // TODO: Make this extensible, to let arbitrary back-ends know that\n // something has changed\n if (global.Shiny && global.Shiny.onInputChange) {\n global.Shiny.onInputChange(\n \".clientValue-\" +\n (this._group.name !== null ? this._group.name + \"-\" : \"\") +\n this._name,\n typeof(value) === \"undefined\" ? null : value\n );\n }\n }\n\n on(eventType, listener) {\n return this._events.on(eventType, listener);\n }\n\n off(eventType, listener) {\n return this._events.off(eventType, listener);\n }\n}\n" - ] -} \ No newline at end of file diff --git a/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.min.js b/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.min.js deleted file mode 100644 index b7ec0ac9f..000000000 --- a/_freeze/site_libs/crosstalk-1.2.1/js/crosstalk.min.js +++ /dev/null @@ -1,2 +0,0 @@ -!function o(u,a,l){function s(n,e){if(!a[n]){if(!u[n]){var t="function"==typeof require&&require;if(!e&&t)return t(n,!0);if(f)return f(n,!0);var r=new Error("Cannot find module '"+n+"'");throw r.code="MODULE_NOT_FOUND",r}var i=a[n]={exports:{}};u[n][0].call(i.exports,function(e){var t=u[n][1][e];return s(t||e)},i,i.exports,o,u,a,l)}return a[n].exports}for(var f="function"==typeof require&&require,e=0;e?@[\\\]^`{|}~])/g,"\\$1")+"']"),r=JSON.parse(n[0].innerText),i=e.factory(t,r);o(t).data("crosstalk-instance",i),o(t).addClass("crosstalk-input-bound")}if(t.Shiny){var e=new t.Shiny.InputBinding,u=t.jQuery;u.extend(e,{find:function(e){return u(e).find(".crosstalk-input")},initialize:function(e){var t,n;u(e).hasClass("crosstalk-input-bound")||(n=o(t=e),Object.keys(r).forEach(function(e){n.hasClass(e)&&!n.hasClass("crosstalk-input-bound")&&i(r[e],t)}))},getId:function(e){return e.id},getValue:function(e){},setValue:function(e,t){},receiveMessage:function(e,t){},subscribe:function(e,t){u(e).data("crosstalk-instance").resume()},unsubscribe:function(e){u(e).data("crosstalk-instance").suspend()}}),t.Shiny.inputBindings.register(e,"crosstalk.inputBinding")}}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],7:[function(r,e,t){(function(e){"use strict";var t=function(e){{if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}}(r("./input")),n=r("./filter");var a=e.jQuery;t.register({className:"crosstalk-input-checkboxgroup",factory:function(e,r){var i=new n.FilterHandle(r.group),o=void 0,u=a(e);return u.on("change","input[type='checkbox']",function(){var e=u.find("input[type='checkbox']:checked");if(0===e.length)o=null,i.clear();else{var t={};e.each(function(){r.map[this.value].forEach(function(e){t[e]=!0})});var n=Object.keys(t);n.sort(),o=n,i.set(n)}}),{suspend:function(){i.clear()},resume:function(){o&&i.set(o)}}}})}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"./filter":2,"./input":6}],8:[function(r,e,t){(function(e){"use strict";var t=n(r("./input")),l=n(r("./util")),s=r("./filter");function n(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}var f=e.jQuery;t.register({className:"crosstalk-input-select",factory:function(e,n){var t=l.dataframeToD3(n.items),r={options:[{value:"",label:"(All)"}].concat(t),valueField:"value",labelField:"label",searchField:"label"},i=f(e).find("select")[0],o=f(i).selectize(r)[0].selectize,u=new s.FilterHandle(n.group),a=void 0;return o.on("change",function(){if(0===o.items.length)a=null,u.clear();else{var t={};o.items.forEach(function(e){n.map[e].forEach(function(e){t[e]=!0})});var e=Object.keys(t);e.sort(),a=e,u.set(e)}}),{suspend:function(){u.clear()},resume:function(){a&&u.set(a)}}}})}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"./filter":2,"./input":6,"./util":11}],9:[function(n,e,t){(function(e){"use strict";var d=function(e,t){if(Array.isArray(e))return e;if(Symbol.iterator in Object(e))return function(e,t){var n=[],r=!0,i=!1,o=void 0;try{for(var u,a=e[Symbol.iterator]();!(r=(u=a.next()).done)&&(n.push(u.value),!t||n.length!==t);r=!0);}catch(e){i=!0,o=e}finally{try{!r&&a.return&&a.return()}finally{if(i)throw o}}return n}(e,t);throw new TypeError("Invalid attempt to destructure non-iterable instance")},t=function(e){{if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}}(n("./input")),a=n("./filter");var v=e.jQuery,p=e.strftime;function y(e,t){for(var n=e.toString();n.length {\n this._eventRelay.trigger(\"change\", e, this);\n });\n this._varOnChangeSub = sub;\n }\n }\n\n /**\n * Combine the given `extraInfo` (if any) with the handle's default\n * `_extraInfo` (if any).\n * @private\n */\n _mergeExtraInfo(extraInfo) {\n return util.extend({},\n this._extraInfo ? this._extraInfo : null,\n extraInfo ? extraInfo : null);\n }\n\n /**\n * Close the handle. This clears this handle's contribution to the filter set,\n * and unsubscribes all event listeners.\n */\n close() {\n this._emitter.removeAllListeners();\n this.clear();\n this.setGroup(null);\n }\n\n /**\n * Clear this handle's contribution to the filter set.\n *\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any options that were\n * passed into the `FilterHandle` constructor).\n * \n * @fires FilterHandle#change\n */\n clear(extraInfo) {\n if (!this._filterSet)\n return;\n this._filterSet.clear(this._id);\n this._onChange(extraInfo);\n }\n\n /**\n * Set this handle's contribution to the filter set. This array should consist\n * of the keys of the rows that _should_ be displayed; any keys that are not\n * present in the array will be considered _filtered out_. Note that multiple\n * `FilterHandle` instances in the group may each contribute an array of keys,\n * and only those keys that appear in _all_ of the arrays make it through the\n * filter.\n *\n * @param {string[]} keys - Empty array, or array of keys. To clear the\n * filter, don't pass an empty array; instead, use the\n * {@link FilterHandle#clear} method.\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any options that were\n * passed into the `FilterHandle` constructor).\n * \n * @fires FilterHandle#change\n */\n set(keys, extraInfo) {\n if (!this._filterSet)\n return;\n this._filterSet.update(this._id, keys);\n this._onChange(extraInfo);\n }\n\n /**\n * @return {string[]|null} - Either: 1) an array of keys that made it through\n * all of the `FilterHandle` instances, or, 2) `null`, which means no filter\n * is being applied (all data should be displayed).\n */\n get filteredKeys() {\n return this._filterSet ? this._filterSet.value : null;\n }\n\n /**\n * Subscribe to events on this `FilterHandle`.\n *\n * @param {string} eventType - Indicates the type of events to listen to.\n * Currently, only `\"change\"` is supported.\n * @param {FilterHandle~listener} listener - The callback function that\n * will be invoked when the event occurs.\n * @return {string} - A token to pass to {@link FilterHandle#off} to cancel\n * this subscription.\n */\n on(eventType, listener) {\n return this._emitter.on(eventType, listener);\n }\n\n /**\n * Cancel event subscriptions created by {@link FilterHandle#on}.\n *\n * @param {string} eventType - The type of event to unsubscribe.\n * @param {string|FilterHandle~listener} listener - Either the callback\n * function previously passed into {@link FilterHandle#on}, or the\n * string that was returned from {@link FilterHandle#on}.\n */\n off(eventType, listener) {\n return this._emitter.off(eventType, listener);\n }\n\n _onChange(extraInfo) {\n if (!this._filterSet)\n return;\n this._filterVar.set(this._filterSet.value, this._mergeExtraInfo(extraInfo));\n }\n\n /**\n * @callback FilterHandle~listener\n * @param {Object} event - An object containing details of the event. For\n * `\"change\"` events, this includes the properties `value` (the new\n * value of the filter set, or `null` if no filter set is active),\n * `oldValue` (the previous value of the filter set), and `sender` (the\n * `FilterHandle` instance that made the change).\n */\n\n}\n\n/**\n * @event FilterHandle#change\n * @type {object}\n * @property {object} value - The new value of the filter set, or `null`\n * if no filter set is active.\n * @property {object} oldValue - The previous value of the filter set.\n * @property {FilterHandle} sender - The `FilterHandle` instance that\n * changed the value.\n */\n","import { diffSortedLists } from \"./util\";\n\nfunction naturalComparator(a, b) {\n if (a === b) {\n return 0;\n } else if (a < b) {\n return -1;\n } else if (a > b) {\n return 1;\n }\n}\n\n/**\n * @private\n */\nexport default class FilterSet {\n constructor() {\n this.reset();\n }\n\n reset() {\n // Key: handle ID, Value: array of selected keys, or null\n this._handles = {};\n // Key: key string, Value: count of handles that include it\n this._keys = {};\n this._value = null;\n this._activeHandles = 0;\n }\n\n get value() {\n return this._value;\n }\n\n update(handleId, keys) {\n if (keys !== null) {\n keys = keys.slice(0); // clone before sorting\n keys.sort(naturalComparator);\n }\n\n let {added, removed} = diffSortedLists(this._handles[handleId], keys);\n this._handles[handleId] = keys;\n\n for (let i = 0; i < added.length; i++) {\n this._keys[added[i]] = (this._keys[added[i]] || 0) + 1;\n }\n for (let i = 0; i < removed.length; i++) {\n this._keys[removed[i]]--;\n }\n\n this._updateValue(keys);\n }\n\n /**\n * @param {string[]} keys Sorted array of strings that indicate\n * a superset of possible keys.\n * @private\n */\n _updateValue(keys = this._allKeys) {\n let handleCount = Object.keys(this._handles).length;\n if (handleCount === 0) {\n this._value = null;\n } else {\n this._value = [];\n for (let i = 0; i < keys.length; i++) {\n let count = this._keys[keys[i]];\n if (count === handleCount) {\n this._value.push(keys[i]);\n }\n }\n }\n }\n\n clear(handleId) {\n if (typeof(this._handles[handleId]) === \"undefined\") {\n return;\n }\n\n let keys = this._handles[handleId];\n if (!keys) {\n keys = [];\n }\n\n for (let i = 0; i < keys.length; i++) {\n this._keys[keys[i]]--;\n }\n delete this._handles[handleId];\n\n this._updateValue();\n }\n\n get _allKeys() {\n let allKeys = Object.keys(this._keys);\n allKeys.sort(naturalComparator);\n return allKeys;\n }\n}\n","import Var from \"./var\";\n\n// Use a global so that multiple copies of crosstalk.js can be loaded and still\n// have groups behave as singletons across all copies.\nglobal.__crosstalk_groups = global.__crosstalk_groups || {};\nlet groups = global.__crosstalk_groups;\n\nexport default function group(groupName) {\n if (groupName && typeof(groupName) === \"string\") {\n if (!groups.hasOwnProperty(groupName)) {\n groups[groupName] = new Group(groupName);\n }\n return groups[groupName];\n } else if (typeof(groupName) === \"object\" && groupName._vars && groupName.var) {\n // Appears to already be a group object\n return groupName;\n } else if (Array.isArray(groupName) &&\n groupName.length == 1 &&\n typeof(groupName[0]) === \"string\") {\n return group(groupName[0]);\n } else {\n throw new Error(\"Invalid groupName argument\");\n }\n}\n\nclass Group {\n constructor(name) {\n this.name = name;\n this._vars = {};\n }\n\n var(name) {\n if (!name || typeof(name) !== \"string\") {\n throw new Error(\"Invalid var name\");\n }\n\n if (!this._vars.hasOwnProperty(name))\n this._vars[name] = new Var(this, name);\n return this._vars[name];\n }\n\n has(name) {\n if (!name || typeof(name) !== \"string\") {\n throw new Error(\"Invalid var name\");\n }\n\n return this._vars.hasOwnProperty(name);\n }\n}\n","import group from \"./group\";\nimport { SelectionHandle } from \"./selection\";\nimport { FilterHandle } from \"./filter\";\nimport { bind } from \"./input\";\nimport \"./input_selectize\";\nimport \"./input_checkboxgroup\";\nimport \"./input_slider\";\n\nconst defaultGroup = group(\"default\");\n\nfunction var_(name) {\n return defaultGroup.var(name);\n}\n\nfunction has(name) {\n return defaultGroup.has(name);\n}\n\nif (global.Shiny) {\n global.Shiny.addCustomMessageHandler(\"update-client-value\", function(message) {\n if (typeof(message.group) === \"string\") {\n group(message.group).var(message.name).set(message.value);\n } else {\n var_(message.name).set(message.value);\n }\n });\n}\n\nconst crosstalk = {\n group: group,\n var: var_,\n has: has,\n SelectionHandle: SelectionHandle,\n FilterHandle: FilterHandle,\n bind: bind\n};\n\n/**\n * @namespace crosstalk\n */\nexport default crosstalk;\nglobal.crosstalk = crosstalk;\n","let $ = global.jQuery;\n\nlet bindings = {};\n\nexport function register(reg) {\n bindings[reg.className] = reg;\n if (global.document && global.document.readyState !== \"complete\") {\n $(() => {\n bind();\n });\n } else if (global.document) {\n setTimeout(bind, 100);\n }\n}\n\nexport function bind() {\n Object.keys(bindings).forEach(function(className) {\n let binding = bindings[className];\n $(\".\" + binding.className).not(\".crosstalk-input-bound\").each(function(i, el) {\n bindInstance(binding, el);\n });\n });\n}\n\n// Escape jQuery identifier\nfunction $escape(val) {\n return val.replace(/([!\"#$%&'()*+,./:;<=>?@[\\\\\\]^`{|}~])/g, \"\\\\$1\");\n}\n\nfunction bindEl(el) {\n let $el = $(el);\n Object.keys(bindings).forEach(function(className) {\n if ($el.hasClass(className) && !$el.hasClass(\"crosstalk-input-bound\")) {\n let binding = bindings[className];\n bindInstance(binding, el);\n }\n });\n}\n\nfunction bindInstance(binding, el) {\n let jsonEl = $(el).find(\"script[type='application/json'][data-for='\" + $escape(el.id) + \"']\");\n let data = JSON.parse(jsonEl[0].innerText);\n\n let instance = binding.factory(el, data);\n $(el).data(\"crosstalk-instance\", instance);\n $(el).addClass(\"crosstalk-input-bound\");\n}\n\nif (global.Shiny) {\n let inputBinding = new global.Shiny.InputBinding();\n let $ = global.jQuery;\n $.extend(inputBinding, {\n find: function(scope) {\n return $(scope).find(\".crosstalk-input\");\n },\n initialize: function(el) {\n if (!$(el).hasClass(\"crosstalk-input-bound\")) {\n bindEl(el);\n }\n },\n getId: function(el) {\n return el.id;\n },\n getValue: function(el) {\n\n },\n setValue: function(el, value) {\n\n },\n receiveMessage: function(el, data) {\n\n },\n subscribe: function(el, callback) {\n $(el).data(\"crosstalk-instance\").resume();\n },\n unsubscribe: function(el) {\n $(el).data(\"crosstalk-instance\").suspend();\n }\n });\n global.Shiny.inputBindings.register(inputBinding, \"crosstalk.inputBinding\");\n}\n","import * as input from \"./input\";\nimport { FilterHandle } from \"./filter\";\n\nlet $ = global.jQuery;\n\ninput.register({\n className: \"crosstalk-input-checkboxgroup\",\n\n factory: function(el, data) {\n /*\n * map: {\"groupA\": [\"keyA\", \"keyB\", ...], ...}\n * group: \"ct-groupname\"\n */\n let ctHandle = new FilterHandle(data.group);\n\n let lastKnownKeys;\n let $el = $(el);\n $el.on(\"change\", \"input[type='checkbox']\", function() {\n let checked = $el.find(\"input[type='checkbox']:checked\");\n if (checked.length === 0) {\n lastKnownKeys = null;\n ctHandle.clear();\n } else {\n let keys = {};\n checked.each(function() {\n data.map[this.value].forEach(function(key) {\n keys[key] = true;\n });\n });\n let keyArray = Object.keys(keys);\n keyArray.sort();\n lastKnownKeys = keyArray;\n ctHandle.set(keyArray);\n }\n });\n\n return {\n suspend: function() {\n ctHandle.clear();\n },\n resume: function() {\n if (lastKnownKeys)\n ctHandle.set(lastKnownKeys);\n }\n };\n }\n});\n","import * as input from \"./input\";\nimport * as util from \"./util\";\nimport { FilterHandle } from \"./filter\";\n\nlet $ = global.jQuery;\n\ninput.register({\n className: \"crosstalk-input-select\",\n\n factory: function(el, data) {\n /*\n * items: {value: [...], label: [...]}\n * map: {\"groupA\": [\"keyA\", \"keyB\", ...], ...}\n * group: \"ct-groupname\"\n */\n\n let first = [{value: \"\", label: \"(All)\"}];\n let items = util.dataframeToD3(data.items);\n let opts = {\n options: first.concat(items),\n valueField: \"value\",\n labelField: \"label\",\n searchField: \"label\"\n };\n\n let select = $(el).find(\"select\")[0];\n\n let selectize = $(select).selectize(opts)[0].selectize;\n\n let ctHandle = new FilterHandle(data.group);\n\n let lastKnownKeys;\n selectize.on(\"change\", function() {\n if (selectize.items.length === 0) {\n lastKnownKeys = null;\n ctHandle.clear();\n } else {\n let keys = {};\n selectize.items.forEach(function(group) {\n data.map[group].forEach(function(key) {\n keys[key] = true;\n });\n });\n let keyArray = Object.keys(keys);\n keyArray.sort();\n lastKnownKeys = keyArray;\n ctHandle.set(keyArray);\n }\n });\n\n return {\n suspend: function() {\n ctHandle.clear();\n },\n resume: function() {\n if (lastKnownKeys)\n ctHandle.set(lastKnownKeys);\n }\n };\n }\n});\n","import * as input from \"./input\";\nimport { FilterHandle } from \"./filter\";\n\nlet $ = global.jQuery;\nlet strftime = global.strftime;\n\ninput.register({\n className: \"crosstalk-input-slider\",\n\n factory: function(el, data) {\n /*\n * map: {\"groupA\": [\"keyA\", \"keyB\", ...], ...}\n * group: \"ct-groupname\"\n */\n let ctHandle = new FilterHandle(data.group);\n\n let opts = {};\n let $el = $(el).find(\"input\");\n let dataType = $el.data(\"data-type\");\n let timeFormat = $el.data(\"time-format\");\n let round = $el.data(\"round\");\n let timeFormatter;\n\n // Set up formatting functions\n if (dataType === \"date\") {\n timeFormatter = strftime.utc();\n opts.prettify = function(num) {\n return timeFormatter(timeFormat, new Date(num));\n };\n\n } else if (dataType === \"datetime\") {\n let timezone = $el.data(\"timezone\");\n if (timezone)\n timeFormatter = strftime.timezone(timezone);\n else\n timeFormatter = strftime;\n\n opts.prettify = function(num) {\n return timeFormatter(timeFormat, new Date(num));\n };\n } else if (dataType === \"number\") {\n if (typeof round !== \"undefined\")\n opts.prettify = function(num) {\n let factor = Math.pow(10, round);\n return Math.round(num * factor) / factor;\n };\n }\n\n $el.ionRangeSlider(opts);\n\n function getValue() {\n let result = $el.data(\"ionRangeSlider\").result;\n\n // Function for converting numeric value from slider to appropriate type.\n let convert;\n let dataType = $el.data(\"data-type\");\n if (dataType === \"date\") {\n convert = function(val) {\n return formatDateUTC(new Date(+val));\n };\n } else if (dataType === \"datetime\") {\n convert = function(val) {\n // Convert ms to s\n return +val / 1000;\n };\n } else {\n convert = function(val) { return +val; };\n }\n\n if ($el.data(\"ionRangeSlider\").options.type === \"double\") {\n return [convert(result.from), convert(result.to)];\n } else {\n return convert(result.from);\n }\n }\n\n let lastKnownKeys = null;\n\n $el.on(\"change.crosstalkSliderInput\", function(event) {\n if (!$el.data(\"updating\") && !$el.data(\"animating\")) {\n let [from, to] = getValue();\n let keys = [];\n for (let i = 0; i < data.values.length; i++) {\n let val = data.values[i];\n if (val >= from && val <= to) {\n keys.push(data.keys[i]);\n }\n }\n keys.sort();\n ctHandle.set(keys);\n lastKnownKeys = keys;\n }\n });\n\n\n // let $el = $(el);\n // $el.on(\"change\", \"input[type=\"checkbox\"]\", function() {\n // let checked = $el.find(\"input[type=\"checkbox\"]:checked\");\n // if (checked.length === 0) {\n // ctHandle.clear();\n // } else {\n // let keys = {};\n // checked.each(function() {\n // data.map[this.value].forEach(function(key) {\n // keys[key] = true;\n // });\n // });\n // let keyArray = Object.keys(keys);\n // keyArray.sort();\n // ctHandle.set(keyArray);\n // }\n // });\n\n return {\n suspend: function() {\n ctHandle.clear();\n },\n resume: function() {\n if (lastKnownKeys)\n ctHandle.set(lastKnownKeys);\n }\n };\n }\n});\n\n\n// Convert a number to a string with leading zeros\nfunction padZeros(n, digits) {\n let str = n.toString();\n while (str.length < digits)\n str = \"0\" + str;\n return str;\n}\n\n// Given a Date object, return a string in yyyy-mm-dd format, using the\n// UTC date. This may be a day off from the date in the local time zone.\nfunction formatDateUTC(date) {\n if (date instanceof Date) {\n return date.getUTCFullYear() + \"-\" +\n padZeros(date.getUTCMonth()+1, 2) + \"-\" +\n padZeros(date.getUTCDate(), 2);\n\n } else {\n return null;\n }\n}\n","import Events from \"./events\";\nimport grp from \"./group\";\nimport * as util from \"./util\";\n\n/**\n * Use this class to read and write (and listen for changes to) the selection\n * for a Crosstalk group. This is intended to be used for linked brushing.\n *\n * If two (or more) `SelectionHandle` instances in the same webpage share the\n * same group name, they will share the same state. Setting the selection using\n * one `SelectionHandle` instance will result in the `value` property instantly\n * changing across the others, and `\"change\"` event listeners on all instances\n * (including the one that initiated the sending) will fire.\n *\n * @param {string} [group] - The name of the Crosstalk group, or if none,\n * null or undefined (or any other falsy value). This can be changed later\n * via the [SelectionHandle#setGroup](#setGroup) method.\n * @param {Object} [extraInfo] - An object whose properties will be copied to\n * the event object whenever an event is emitted.\n */\nexport class SelectionHandle {\n\n constructor(group = null, extraInfo = null) {\n this._eventRelay = new Events();\n this._emitter = new util.SubscriptionTracker(this._eventRelay);\n\n // Name of the group we're currently tracking, if any. Can change over time.\n this._group = null;\n // The Var we're currently tracking, if any. Can change over time.\n this._var = null;\n // The event handler subscription we currently have on var.on(\"change\").\n this._varOnChangeSub = null;\n\n this._extraInfo = util.extend({ sender: this }, extraInfo);\n\n this.setGroup(group);\n }\n\n /**\n * Changes the Crosstalk group membership of this SelectionHandle. The group\n * being switched away from (if any) will not have its selection value\n * modified as a result of calling `setGroup`, even if this handle was the\n * most recent handle to set the selection of the group.\n *\n * The group being switched to (if any) will also not have its selection value\n * modified as a result of calling `setGroup`. If you want to set the\n * selection value of the new group, call `set` explicitly.\n *\n * @param {string} group - The name of the Crosstalk group, or null (or\n * undefined) to clear the group.\n */\n setGroup(group) {\n // If group is unchanged, do nothing\n if (this._group === group)\n return;\n // Treat null, undefined, and other falsy values the same\n if (!this._group && !group)\n return;\n\n if (this._var) {\n this._var.off(\"change\", this._varOnChangeSub);\n this._var = null;\n this._varOnChangeSub = null;\n }\n\n this._group = group;\n\n if (group) {\n this._var = grp(group).var(\"selection\");\n let sub = this._var.on(\"change\", (e) => {\n this._eventRelay.trigger(\"change\", e, this);\n });\n this._varOnChangeSub = sub;\n }\n }\n\n /**\n * Retrieves the current selection for the group represented by this\n * `SelectionHandle`.\n *\n * - If no selection is active, then this value will be falsy.\n * - If a selection is active, but no data points are selected, then this\n * value will be an empty array.\n * - If a selection is active, and data points are selected, then the keys\n * of the selected data points will be present in the array.\n */\n get value() {\n return this._var ? this._var.get() : null;\n }\n\n /**\n * Combines the given `extraInfo` (if any) with the handle's default\n * `_extraInfo` (if any).\n * @private\n */\n _mergeExtraInfo(extraInfo) {\n // Important incidental effect: shallow clone is returned\n return util.extend({},\n this._extraInfo ? this._extraInfo : null,\n extraInfo ? extraInfo : null);\n }\n\n /**\n * Overwrites the current selection for the group, and raises the `\"change\"`\n * event among all of the group's '`SelectionHandle` instances (including\n * this one).\n *\n * @fires SelectionHandle#change\n * @param {string[]} selectedKeys - Falsy, empty array, or array of keys (see\n * {@link SelectionHandle#value}).\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any options that were\n * passed into the `SelectionHandle` constructor).\n */\n set(selectedKeys, extraInfo) {\n if (this._var)\n this._var.set(selectedKeys, this._mergeExtraInfo(extraInfo));\n }\n\n /**\n * Overwrites the current selection for the group, and raises the `\"change\"`\n * event among all of the group's '`SelectionHandle` instances (including\n * this one).\n *\n * @fires SelectionHandle#change\n * @param {Object} [extraInfo] - Extra properties to be included on the event\n * object that's passed to listeners (in addition to any that were passed\n * into the `SelectionHandle` constructor).\n */\n clear(extraInfo) {\n if (this._var)\n this.set(void 0, this._mergeExtraInfo(extraInfo));\n }\n\n /**\n * Subscribes to events on this `SelectionHandle`.\n *\n * @param {string} eventType - Indicates the type of events to listen to.\n * Currently, only `\"change\"` is supported.\n * @param {SelectionHandle~listener} listener - The callback function that\n * will be invoked when the event occurs.\n * @return {string} - A token to pass to {@link SelectionHandle#off} to cancel\n * this subscription.\n */\n on(eventType, listener) {\n return this._emitter.on(eventType, listener);\n }\n\n /**\n * Cancels event subscriptions created by {@link SelectionHandle#on}.\n *\n * @param {string} eventType - The type of event to unsubscribe.\n * @param {string|SelectionHandle~listener} listener - Either the callback\n * function previously passed into {@link SelectionHandle#on}, or the\n * string that was returned from {@link SelectionHandle#on}.\n */\n off(eventType, listener) {\n return this._emitter.off(eventType, listener);\n }\n\n /**\n * Shuts down the `SelectionHandle` object.\n *\n * Removes all event listeners that were added through this handle.\n */\n close() {\n this._emitter.removeAllListeners();\n this.setGroup(null);\n }\n}\n\n/**\n * @callback SelectionHandle~listener\n * @param {Object} event - An object containing details of the event. For\n * `\"change\"` events, this includes the properties `value` (the new\n * value of the selection, or `undefined` if no selection is active),\n * `oldValue` (the previous value of the selection), and `sender` (the\n * `SelectionHandle` instance that made the change).\n */\n\n/**\n * @event SelectionHandle#change\n * @type {object}\n * @property {object} value - The new value of the selection, or `undefined`\n * if no selection is active.\n * @property {object} oldValue - The previous value of the selection.\n * @property {SelectionHandle} sender - The `SelectionHandle` instance that\n * changed the value.\n */\n","export function extend(target, ...sources) {\n for (let i = 0; i < sources.length; i++) {\n let src = sources[i];\n if (typeof(src) === \"undefined\" || src === null)\n continue;\n\n for (let key in src) {\n if (src.hasOwnProperty(key)) {\n target[key] = src[key];\n }\n }\n }\n return target;\n}\n\nexport function checkSorted(list) {\n for (let i = 1; i < list.length; i++) {\n if (list[i] <= list[i-1]) {\n throw new Error(\"List is not sorted or contains duplicate\");\n }\n }\n}\n\nexport function diffSortedLists(a, b) {\n let i_a = 0;\n let i_b = 0;\n\n if (!a) a = [];\n if (!b) b = [];\n\n let a_only = [];\n let b_only = [];\n\n checkSorted(a);\n checkSorted(b);\n\n while (i_a < a.length && i_b < b.length) {\n if (a[i_a] === b[i_b]) {\n i_a++;\n i_b++;\n } else if (a[i_a] < b[i_b]) {\n a_only.push(a[i_a++]);\n } else {\n b_only.push(b[i_b++]);\n }\n }\n\n if (i_a < a.length)\n a_only = a_only.concat(a.slice(i_a));\n if (i_b < b.length)\n b_only = b_only.concat(b.slice(i_b));\n return {\n removed: a_only,\n added: b_only\n };\n}\n\n// Convert from wide: { colA: [1,2,3], colB: [4,5,6], ... }\n// to long: [ {colA: 1, colB: 4}, {colA: 2, colB: 5}, ... ]\nexport function dataframeToD3(df) {\n let names = [];\n let length;\n for (let name in df) {\n if (df.hasOwnProperty(name))\n names.push(name);\n if (typeof(df[name]) !== \"object\" || typeof(df[name].length) === \"undefined\") {\n throw new Error(\"All fields must be arrays\");\n } else if (typeof(length) !== \"undefined\" && length !== df[name].length) {\n throw new Error(\"All fields must be arrays of the same length\");\n }\n length = df[name].length;\n }\n let results = [];\n let item;\n for (let row = 0; row < length; row++) {\n item = {};\n for (let col = 0; col < names.length; col++) {\n item[names[col]] = df[names[col]][row];\n }\n results.push(item);\n }\n return results;\n}\n\n/**\n * Keeps track of all event listener additions/removals and lets all active\n * listeners be removed with a single operation.\n *\n * @private\n */\nexport class SubscriptionTracker {\n constructor(emitter) {\n this._emitter = emitter;\n this._subs = {};\n }\n\n on(eventType, listener) {\n let sub = this._emitter.on(eventType, listener);\n this._subs[sub] = eventType;\n return sub;\n }\n\n off(eventType, listener) {\n let sub = this._emitter.off(eventType, listener);\n if (sub) {\n delete this._subs[sub];\n }\n return sub;\n }\n\n removeAllListeners() {\n let current_subs = this._subs;\n this._subs = {};\n Object.keys(current_subs).forEach((sub) => {\n this._emitter.off(current_subs[sub], sub);\n });\n }\n}\n","import Events from \"./events\";\n\nexport default class Var {\n constructor(group, name, /*optional*/ value) {\n this._group = group;\n this._name = name;\n this._value = value;\n this._events = new Events();\n }\n\n get() {\n return this._value;\n }\n\n set(value, /*optional*/ event) {\n if (this._value === value) {\n // Do nothing; the value hasn't changed\n return;\n }\n let oldValue = this._value;\n this._value = value;\n // Alert JavaScript listeners that the value has changed\n let evt = {};\n if (event && typeof(event) === \"object\") {\n for (let k in event) {\n if (event.hasOwnProperty(k))\n evt[k] = event[k];\n }\n }\n evt.oldValue = oldValue;\n evt.value = value;\n this._events.trigger(\"change\", evt, this);\n\n // TODO: Make this extensible, to let arbitrary back-ends know that\n // something has changed\n if (global.Shiny && global.Shiny.onInputChange) {\n global.Shiny.onInputChange(\n \".clientValue-\" +\n (this._group.name !== null ? this._group.name + \"-\" : \"\") +\n this._name,\n typeof(value) === \"undefined\" ? null : value\n );\n }\n }\n\n on(eventType, listener) {\n return this._events.on(eventType, listener);\n }\n\n off(eventType, listener) {\n return this._events.off(eventType, listener);\n }\n}\n"]} \ No newline at end of file diff --git a/_freeze/site_libs/crosstalk-1.2.1/scss/crosstalk.scss b/_freeze/site_libs/crosstalk-1.2.1/scss/crosstalk.scss deleted file mode 100644 index 35665616f..000000000 --- a/_freeze/site_libs/crosstalk-1.2.1/scss/crosstalk.scss +++ /dev/null @@ -1,75 +0,0 @@ -/* Adjust margins outwards, so column contents line up with the edges of the - parent of container-fluid. */ -.container-fluid.crosstalk-bscols { - margin-left: -30px; - margin-right: -30px; - white-space: normal; -} - -/* But don't adjust the margins outwards if we're directly under the body, - i.e. we were the top-level of something at the console. */ -body > .container-fluid.crosstalk-bscols { - margin-left: auto; - margin-right: auto; -} - -.crosstalk-input-checkboxgroup .crosstalk-options-group .crosstalk-options-column { - display: inline-block; - padding-right: 12px; - vertical-align: top; -} - -@media only screen and (max-width:480px) { - .crosstalk-input-checkboxgroup .crosstalk-options-group .crosstalk-options-column { - display: block; - padding-right: inherit; - } -} - -/* Relevant BS3 styles to make filter_checkbox() look reasonable without Bootstrap */ -.crosstalk-input { - margin-bottom: 15px; /* a la .form-group */ - .control-label { - margin-bottom: 0; - vertical-align: middle; - } - input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px; - line-height: normal; - } - .checkbox { - position: relative; - display: block; - margin-top: 10px; - margin-bottom: 10px; - } - .checkbox > label{ - padding-left: 20px; - margin-bottom: 0; - font-weight: 400; - cursor: pointer; - } - .checkbox input[type="checkbox"], - .checkbox-inline input[type="checkbox"] { - position: absolute; - margin-top: 2px; - margin-left: -20px; - } - .checkbox + .checkbox { - margin-top: -5px; - } - .checkbox-inline { - position: relative; - display: inline-block; - padding-left: 20px; - margin-bottom: 0; - font-weight: 400; - vertical-align: middle; - cursor: pointer; - } - .checkbox-inline + .checkbox-inline { - margin-top: 0; - margin-left: 10px; - } -} diff --git a/_freeze/site_libs/htmltools-fill-0.5.8.1/fill.css b/_freeze/site_libs/htmltools-fill-0.5.8.1/fill.css deleted file mode 100644 index 841ea9d59..000000000 --- a/_freeze/site_libs/htmltools-fill-0.5.8.1/fill.css +++ /dev/null @@ -1,21 +0,0 @@ -@layer htmltools { - .html-fill-container { - display: flex; - flex-direction: column; - /* Prevent the container from expanding vertically or horizontally beyond its - parent's constraints. */ - min-height: 0; - min-width: 0; - } - .html-fill-container > .html-fill-item { - /* Fill items can grow and shrink freely within - available vertical space in fillable container */ - flex: 1 1 auto; - min-height: 0; - min-width: 0; - } - .html-fill-container > :not(.html-fill-item) { - /* Prevent shrinking or growing of non-fill items */ - flex: 0 0 auto; - } -} diff --git a/_freeze/site_libs/plotly-binding-4.10.4/plotly.js b/_freeze/site_libs/plotly-binding-4.10.4/plotly.js deleted file mode 100644 index 7a2a143b6..000000000 --- a/_freeze/site_libs/plotly-binding-4.10.4/plotly.js +++ /dev/null @@ -1,941 +0,0 @@ - -HTMLWidgets.widget({ - name: "plotly", - type: "output", - - initialize: function(el, width, height) { - return {}; - }, - - resize: function(el, width, height, instance) { - if (instance.autosize) { - var width = instance.width || width; - var height = instance.height || height; - Plotly.relayout(el.id, {width: width, height: height}); - } - }, - - renderValue: function(el, x, instance) { - - // Plotly.relayout() mutates the plot input object, so make sure to - // keep a reference to the user-supplied width/height *before* - // we call Plotly.plot(); - var lay = x.layout || {}; - instance.width = lay.width; - instance.height = lay.height; - instance.autosize = lay.autosize || true; - - /* - / 'inform the world' about highlighting options this is so other - / crosstalk libraries have a chance to respond to special settings - / such as persistent selection. - / AFAIK, leaflet is the only library with such intergration - / https://github.com/rstudio/leaflet/pull/346/files#diff-ad0c2d51ce5fdf8c90c7395b102f4265R154 - */ - var ctConfig = crosstalk.var('plotlyCrosstalkOpts').set(x.highlight); - - if (typeof(window) !== "undefined") { - // make sure plots don't get created outside the network (for on-prem) - window.PLOTLYENV = window.PLOTLYENV || {}; - window.PLOTLYENV.BASE_URL = x.base_url; - - // Enable persistent selection when shift key is down - // https://stackoverflow.com/questions/1828613/check-if-a-key-is-down - var persistOnShift = function(e) { - if (!e) window.event; - if (e.shiftKey) { - x.highlight.persistent = true; - x.highlight.persistentShift = true; - } else { - x.highlight.persistent = false; - x.highlight.persistentShift = false; - } - }; - - // Only relevant if we haven't forced persistent mode at command line - if (!x.highlight.persistent) { - window.onmousemove = persistOnShift; - } - } - - var graphDiv = document.getElementById(el.id); - - // TODO: move the control panel injection strategy inside here... - HTMLWidgets.addPostRenderHandler(function() { - - // lower the z-index of the modebar to prevent it from highjacking hover - // (TODO: do this via CSS?) - // https://github.com/ropensci/plotly/issues/956 - // https://www.w3schools.com/jsref/prop_style_zindex.asp - var modebars = document.querySelectorAll(".js-plotly-plot .plotly .modebar"); - for (var i = 0; i < modebars.length; i++) { - modebars[i].style.zIndex = 1; - } - }); - - // inject a "control panel" holding selectize/dynamic color widget(s) - if ((x.selectize || x.highlight.dynamic) && !instance.plotly) { - var flex = document.createElement("div"); - flex.class = "plotly-crosstalk-control-panel"; - flex.style = "display: flex; flex-wrap: wrap"; - - // inject the colourpicker HTML container into the flexbox - if (x.highlight.dynamic) { - var pickerDiv = document.createElement("div"); - - var pickerInput = document.createElement("input"); - pickerInput.id = el.id + "-colourpicker"; - pickerInput.placeholder = "asdasd"; - - var pickerLabel = document.createElement("label"); - pickerLabel.for = pickerInput.id; - pickerLabel.innerHTML = "Brush color  "; - - pickerDiv.appendChild(pickerLabel); - pickerDiv.appendChild(pickerInput); - flex.appendChild(pickerDiv); - } - - // inject selectize HTML containers (one for every crosstalk group) - if (x.selectize) { - var ids = Object.keys(x.selectize); - - for (var i = 0; i < ids.length; i++) { - var container = document.createElement("div"); - container.id = ids[i]; - container.style = "width: 80%; height: 10%"; - container.class = "form-group crosstalk-input-plotly-highlight"; - - var label = document.createElement("label"); - label.for = ids[i]; - label.innerHTML = x.selectize[ids[i]].group; - label.class = "control-label"; - - var selectDiv = document.createElement("div"); - var select = document.createElement("select"); - select.multiple = true; - - selectDiv.appendChild(select); - container.appendChild(label); - container.appendChild(selectDiv); - flex.appendChild(container); - } - } - - // finally, insert the flexbox inside the htmlwidget container, - // but before the plotly graph div - graphDiv.parentElement.insertBefore(flex, graphDiv); - - if (x.highlight.dynamic) { - var picker = $("#" + pickerInput.id); - var colors = x.highlight.color || []; - // TODO: let users specify options? - var opts = { - value: colors[0], - showColour: "both", - palette: "limited", - allowedCols: colors.join(" "), - width: "20%", - height: "10%" - }; - picker.colourpicker({changeDelay: 0}); - picker.colourpicker("settings", opts); - picker.colourpicker("value", opts.value); - // inform crosstalk about a change in the current selection colour - var grps = x.highlight.ctGroups || []; - for (var i = 0; i < grps.length; i++) { - crosstalk.group(grps[i]).var('plotlySelectionColour') - .set(picker.colourpicker('value')); - } - picker.on("change", function() { - for (var i = 0; i < grps.length; i++) { - crosstalk.group(grps[i]).var('plotlySelectionColour') - .set(picker.colourpicker('value')); - } - }); - } - } - - // if no plot exists yet, create one with a particular configuration - if (!instance.plotly) { - - var plot = Plotly.newPlot(graphDiv, x); - instance.plotly = true; - - } else if (x.layout.transition) { - - var plot = Plotly.react(graphDiv, x); - - } else { - - // this is essentially equivalent to Plotly.newPlot(), but avoids creating - // a new webgl context - // https://github.com/plotly/plotly.js/blob/2b24f9def901831e61282076cf3f835598d56f0e/src/plot_api/plot_api.js#L531-L532 - - // TODO: restore crosstalk selections? - Plotly.purge(graphDiv); - // TODO: why is this necessary to get crosstalk working? - graphDiv.data = undefined; - graphDiv.layout = undefined; - var plot = Plotly.newPlot(graphDiv, x); - } - - // Trigger plotly.js calls defined via `plotlyProxy()` - plot.then(function() { - if (HTMLWidgets.shinyMode) { - Shiny.addCustomMessageHandler("plotly-calls", function(msg) { - var gd = document.getElementById(msg.id); - if (!gd) { - throw new Error("Couldn't find plotly graph with id: " + msg.id); - } - // This isn't an official plotly.js method, but it's the only current way to - // change just the configuration of a plot - // https://community.plot.ly/t/update-config-function/9057 - if (msg.method == "reconfig") { - Plotly.react(gd, gd.data, gd.layout, msg.args); - return; - } - if (!Plotly[msg.method]) { - throw new Error("Unknown method " + msg.method); - } - var args = [gd].concat(msg.args); - Plotly[msg.method].apply(null, args); - }); - } - - // plotly's mapbox API doesn't currently support setting bounding boxes - // https://www.mapbox.com/mapbox-gl-js/example/fitbounds/ - // so we do this manually... - // TODO: make sure this triggers on a redraw and relayout as well as on initial draw - var mapboxIDs = graphDiv._fullLayout._subplots.mapbox || []; - for (var i = 0; i < mapboxIDs.length; i++) { - var id = mapboxIDs[i]; - var mapOpts = x.layout[id] || {}; - var args = mapOpts._fitBounds || {}; - if (!args) { - continue; - } - var mapObj = graphDiv._fullLayout[id]._subplot.map; - mapObj.fitBounds(args.bounds, args.options); - } - - }); - - // Attach attributes (e.g., "key", "z") to plotly event data - function eventDataWithKey(eventData) { - if (eventData === undefined || !eventData.hasOwnProperty("points")) { - return null; - } - return eventData.points.map(function(pt) { - var obj = { - curveNumber: pt.curveNumber, - pointNumber: pt.pointNumber, - x: pt.x, - y: pt.y - }; - - // If 'z' is reported with the event data, then use it! - if (pt.hasOwnProperty("z")) { - obj.z = pt.z; - } - - if (pt.hasOwnProperty("customdata")) { - obj.customdata = pt.customdata; - } - - /* - TL;DR: (I think) we have to select the graph div (again) to attach keys... - - Why? Remember that crosstalk will dynamically add/delete traces - (see traceManager.prototype.updateSelection() below) - For this reason, we can't simply grab keys from x.data (like we did previously) - Moreover, we can't use _fullData, since that doesn't include - unofficial attributes. It's true that click/hover events fire with - pt.data, but drag events don't... - */ - var gd = document.getElementById(el.id); - var trace = gd.data[pt.curveNumber]; - - if (!trace._isSimpleKey) { - var attrsToAttach = ["key"]; - } else { - // simple keys fire the whole key - obj.key = trace.key; - var attrsToAttach = []; - } - - for (var i = 0; i < attrsToAttach.length; i++) { - var attr = trace[attrsToAttach[i]]; - if (Array.isArray(attr)) { - if (typeof pt.pointNumber === "number") { - obj[attrsToAttach[i]] = attr[pt.pointNumber]; - } else if (Array.isArray(pt.pointNumber)) { - obj[attrsToAttach[i]] = attr[pt.pointNumber[0]][pt.pointNumber[1]]; - } else if (Array.isArray(pt.pointNumbers)) { - obj[attrsToAttach[i]] = pt.pointNumbers.map(function(idx) { return attr[idx]; }); - } - } - } - return obj; - }); - } - - - var legendEventData = function(d) { - // if legendgroup is not relevant just return the trace - var trace = d.data[d.curveNumber]; - if (!trace.legendgroup) return trace; - - // if legendgroup was specified, return all traces that match the group - var legendgrps = d.data.map(function(trace){ return trace.legendgroup; }); - var traces = []; - for (i = 0; i < legendgrps.length; i++) { - if (legendgrps[i] == trace.legendgroup) { - traces.push(d.data[i]); - } - } - - return traces; - }; - - - // send user input event data to shiny - if (HTMLWidgets.shinyMode && Shiny.setInputValue) { - - // Some events clear other input values - // TODO: always register these? - var eventClearMap = { - plotly_deselect: ["plotly_selected", "plotly_selecting", "plotly_brushed", "plotly_brushing", "plotly_click"], - plotly_unhover: ["plotly_hover"], - plotly_doubleclick: ["plotly_click"] - }; - - Object.keys(eventClearMap).map(function(evt) { - graphDiv.on(evt, function() { - var inputsToClear = eventClearMap[evt]; - inputsToClear.map(function(input) { - Shiny.setInputValue(input + "-" + x.source, null, {priority: "event"}); - }); - }); - }); - - var eventDataFunctionMap = { - plotly_click: eventDataWithKey, - plotly_sunburstclick: eventDataWithKey, - plotly_hover: eventDataWithKey, - plotly_unhover: eventDataWithKey, - // If 'plotly_selected' has already been fired, and you click - // on the plot afterwards, this event fires `undefined`?!? - // That might be considered a plotly.js bug, but it doesn't make - // sense for this input change to occur if `d` is falsy because, - // even in the empty selection case, `d` is truthy (an object), - // and the 'plotly_deselect' event will reset this input - plotly_selected: function(d) { if (d) { return eventDataWithKey(d); } }, - plotly_selecting: function(d) { if (d) { return eventDataWithKey(d); } }, - plotly_brushed: function(d) { - if (d) { return d.range ? d.range : d.lassoPoints; } - }, - plotly_brushing: function(d) { - if (d) { return d.range ? d.range : d.lassoPoints; } - }, - plotly_legendclick: legendEventData, - plotly_legenddoubleclick: legendEventData, - plotly_clickannotation: function(d) { return d.fullAnnotation } - }; - - var registerShinyValue = function(event) { - var eventDataPreProcessor = eventDataFunctionMap[event] || function(d) { return d ? d : el.id }; - // some events are unique to the R package - var plotlyJSevent = (event == "plotly_brushed") ? "plotly_selected" : (event == "plotly_brushing") ? "plotly_selecting" : event; - // register the event - graphDiv.on(plotlyJSevent, function(d) { - Shiny.setInputValue( - event + "-" + x.source, - JSON.stringify(eventDataPreProcessor(d)), - {priority: "event"} - ); - }); - } - - var shinyEvents = x.shinyEvents || []; - shinyEvents.map(registerShinyValue); - } - - // Given an array of {curveNumber: x, pointNumber: y} objects, - // return a hash of { - // set1: {value: [key1, key2, ...], _isSimpleKey: false}, - // set2: {value: [key3, key4, ...], _isSimpleKey: false} - // } - function pointsToKeys(points) { - var keysBySet = {}; - for (var i = 0; i < points.length; i++) { - - var trace = graphDiv.data[points[i].curveNumber]; - if (!trace.key || !trace.set) { - continue; - } - - // set defaults for this keySet - // note that we don't track the nested property (yet) since we always - // emit the union -- http://cpsievert.github.io/talks/20161212b/#21 - keysBySet[trace.set] = keysBySet[trace.set] || { - value: [], - _isSimpleKey: trace._isSimpleKey - }; - - // Use pointNumber by default, but aggregated traces should emit pointNumbers - var ptNum = points[i].pointNumber; - var hasPtNum = typeof ptNum === "number"; - var ptNum = hasPtNum ? ptNum : points[i].pointNumbers; - - // selecting a point of a "simple" trace means: select the - // entire key attached to this trace, which is useful for, - // say clicking on a fitted line to select corresponding observations - var key = trace._isSimpleKey ? trace.key : Array.isArray(ptNum) ? ptNum.map(function(idx) { return trace.key[idx]; }) : trace.key[ptNum]; - // http://stackoverflow.com/questions/10865025/merge-flatten-an-array-of-arrays-in-javascript - var keyFlat = trace._isNestedKey ? [].concat.apply([], key) : key; - - // TODO: better to only add new values? - keysBySet[trace.set].value = keysBySet[trace.set].value.concat(keyFlat); - } - - return keysBySet; - } - - - x.highlight.color = x.highlight.color || []; - // make sure highlight color is an array - if (!Array.isArray(x.highlight.color)) { - x.highlight.color = [x.highlight.color]; - } - - var traceManager = new TraceManager(graphDiv, x.highlight); - - // Gather all *unique* sets. - var allSets = []; - for (var curveIdx = 0; curveIdx < x.data.length; curveIdx++) { - var newSet = x.data[curveIdx].set; - if (newSet) { - if (allSets.indexOf(newSet) === -1) { - allSets.push(newSet); - } - } - } - - // register event listeners for all sets - for (var i = 0; i < allSets.length; i++) { - - var set = allSets[i]; - var selection = new crosstalk.SelectionHandle(set); - var filter = new crosstalk.FilterHandle(set); - - var filterChange = function(e) { - removeBrush(el); - traceManager.updateFilter(set, e.value); - }; - filter.on("change", filterChange); - - - var selectionChange = function(e) { - - // Workaround for 'plotly_selected' now firing previously selected - // points (in addition to new ones) when holding shift key. In our case, - // we just want the new keys - if (x.highlight.on === "plotly_selected" && x.highlight.persistentShift) { - // https://stackoverflow.com/questions/1187518/how-to-get-the-difference-between-two-arrays-in-javascript - Array.prototype.diff = function(a) { - return this.filter(function(i) {return a.indexOf(i) < 0;}); - }; - e.value = e.value.diff(e.oldValue); - } - - // array of "event objects" tracking the selection history - // this is used to avoid adding redundant selections - var selectionHistory = crosstalk.var("plotlySelectionHistory").get() || []; - - // Construct an event object "defining" the current event. - var event = { - receiverID: traceManager.gd.id, - plotlySelectionColour: crosstalk.group(set).var("plotlySelectionColour").get() - }; - event[set] = e.value; - // TODO: is there a smarter way to check object equality? - if (selectionHistory.length > 0) { - var ev = JSON.stringify(event); - for (var i = 0; i < selectionHistory.length; i++) { - var sel = JSON.stringify(selectionHistory[i]); - if (sel == ev) { - return; - } - } - } - - // accumulate history for persistent selection - if (!x.highlight.persistent) { - selectionHistory = [event]; - } else { - selectionHistory.push(event); - } - crosstalk.var("plotlySelectionHistory").set(selectionHistory); - - // do the actual updating of traces, frames, and the selectize widget - traceManager.updateSelection(set, e.value); - // https://github.com/selectize/selectize.js/blob/master/docs/api.md#methods_items - if (x.selectize) { - if (!x.highlight.persistent || e.value === null) { - selectize.clear(true); - } - selectize.addItems(e.value, true); - selectize.close(); - } - } - selection.on("change", selectionChange); - - // Set a crosstalk variable selection value, triggering an update - var turnOn = function(e) { - if (e) { - var selectedKeys = pointsToKeys(e.points); - // Keys are group names, values are array of selected keys from group. - for (var set in selectedKeys) { - if (selectedKeys.hasOwnProperty(set)) { - selection.set(selectedKeys[set].value, {sender: el}); - } - } - } - }; - if (x.highlight.debounce > 0) { - turnOn = debounce(turnOn, x.highlight.debounce); - } - graphDiv.on(x.highlight.on, turnOn); - - graphDiv.on(x.highlight.off, function turnOff(e) { - // remove any visual clues - removeBrush(el); - // remove any selection history - crosstalk.var("plotlySelectionHistory").set(null); - // trigger the actual removal of selection traces - selection.set(null, {sender: el}); - }); - - // register a callback for selectize so that there is bi-directional - // communication between the widget and direct manipulation events - if (x.selectize) { - var selectizeID = Object.keys(x.selectize)[i]; - var options = x.selectize[selectizeID]; - var first = [{value: "", label: "(All)"}]; - var opts = $.extend({ - options: first.concat(options.items), - searchField: "label", - valueField: "value", - labelField: "label", - maxItems: 50 - }, - options - ); - var select = $("#" + selectizeID).find("select")[0]; - var selectize = $(select).selectize(opts)[0].selectize; - // NOTE: this callback is triggered when *directly* altering - // dropdown items - selectize.on("change", function() { - var currentItems = traceManager.groupSelections[set] || []; - if (!x.highlight.persistent) { - removeBrush(el); - for (var i = 0; i < currentItems.length; i++) { - selectize.removeItem(currentItems[i], true); - } - } - var newItems = selectize.items.filter(function(idx) { - return currentItems.indexOf(idx) < 0; - }); - if (newItems.length > 0) { - traceManager.updateSelection(set, newItems); - } else { - // Item has been removed... - // TODO: this logic won't work for dynamically changing palette - traceManager.updateSelection(set, null); - traceManager.updateSelection(set, selectize.items); - } - }); - } - } // end of selectionChange - - } // end of renderValue -}); // end of widget definition - -/** - * @param graphDiv The Plotly graph div - * @param highlight An object with options for updating selection(s) - */ -function TraceManager(graphDiv, highlight) { - // The Plotly graph div - this.gd = graphDiv; - - // Preserve the original data. - // TODO: try using Lib.extendFlat() as done in - // https://github.com/plotly/plotly.js/pull/1136 - this.origData = JSON.parse(JSON.stringify(graphDiv.data)); - - // avoid doing this over and over - this.origOpacity = []; - for (var i = 0; i < this.origData.length; i++) { - this.origOpacity[i] = this.origData[i].opacity === 0 ? 0 : (this.origData[i].opacity || 1); - } - - // key: group name, value: null or array of keys representing the - // most recently received selection for that group. - this.groupSelections = {}; - - // selection parameters (e.g., transient versus persistent selection) - this.highlight = highlight; -} - -TraceManager.prototype.close = function() { - // TODO: Unhook all event handlers -}; - -TraceManager.prototype.updateFilter = function(group, keys) { - - if (typeof(keys) === "undefined" || keys === null) { - - this.gd.data = JSON.parse(JSON.stringify(this.origData)); - - } else { - - var traces = []; - for (var i = 0; i < this.origData.length; i++) { - var trace = this.origData[i]; - if (!trace.key || trace.set !== group) { - continue; - } - var matchFunc = getMatchFunc(trace); - var matches = matchFunc(trace.key, keys); - - if (matches.length > 0) { - if (!trace._isSimpleKey) { - // subsetArrayAttrs doesn't mutate trace (it makes a modified clone) - trace = subsetArrayAttrs(trace, matches); - } - traces.push(trace); - } - } - this.gd.data = traces; - } - - Plotly.redraw(this.gd); - - // NOTE: we purposely do _not_ restore selection(s), since on filter, - // axis likely will update, changing the pixel -> data mapping, leading - // to a likely mismatch in the brush outline and highlighted marks - -}; - -TraceManager.prototype.updateSelection = function(group, keys) { - - if (keys !== null && !Array.isArray(keys)) { - throw new Error("Invalid keys argument; null or array expected"); - } - - // if selection has been cleared, or if this is transient - // selection, delete the "selection traces" - var nNewTraces = this.gd.data.length - this.origData.length; - if (keys === null || !this.highlight.persistent && nNewTraces > 0) { - var tracesToRemove = []; - for (var i = 0; i < this.gd.data.length; i++) { - if (this.gd.data[i]._isCrosstalkTrace) tracesToRemove.push(i); - } - Plotly.deleteTraces(this.gd, tracesToRemove); - this.groupSelections[group] = keys; - } else { - // add to the groupSelection, rather than overwriting it - // TODO: can this be removed? - this.groupSelections[group] = this.groupSelections[group] || []; - for (var i = 0; i < keys.length; i++) { - var k = keys[i]; - if (this.groupSelections[group].indexOf(k) < 0) { - this.groupSelections[group].push(k); - } - } - } - - if (keys === null) { - - Plotly.restyle(this.gd, {"opacity": this.origOpacity}); - - } else if (keys.length >= 1) { - - // placeholder for new "selection traces" - var traces = []; - // this variable is set in R/highlight.R - var selectionColour = crosstalk.group(group).var("plotlySelectionColour").get() || - this.highlight.color[0]; - - for (var i = 0; i < this.origData.length; i++) { - // TODO: try using Lib.extendFlat() as done in - // https://github.com/plotly/plotly.js/pull/1136 - var trace = JSON.parse(JSON.stringify(this.gd.data[i])); - if (!trace.key || trace.set !== group) { - continue; - } - // Get sorted array of matching indices in trace.key - var matchFunc = getMatchFunc(trace); - var matches = matchFunc(trace.key, keys); - - if (matches.length > 0) { - // If this is a "simple" key, that means select the entire trace - if (!trace._isSimpleKey) { - trace = subsetArrayAttrs(trace, matches); - } - // reach into the full trace object so we can properly reflect the - // selection attributes in every view - var d = this.gd._fullData[i]; - - /* - / Recursively inherit selection attributes from various sources, - / in order of preference: - / (1) official plotly.js selected attribute - / (2) highlight(selected = attrs_selected(...)) - */ - // TODO: it would be neat to have a dropdown to dynamically specify these! - $.extend(true, trace, this.highlight.selected); - - // if it is defined, override color with the "dynamic brush color"" - if (d.marker) { - trace.marker = trace.marker || {}; - trace.marker.color = selectionColour || trace.marker.color || d.marker.color; - } - if (d.line) { - trace.line = trace.line || {}; - trace.line.color = selectionColour || trace.line.color || d.line.color; - } - if (d.textfont) { - trace.textfont = trace.textfont || {}; - trace.textfont.color = selectionColour || trace.textfont.color || d.textfont.color; - } - if (d.fillcolor) { - // TODO: should selectionColour inherit alpha from the existing fillcolor? - trace.fillcolor = selectionColour || trace.fillcolor || d.fillcolor; - } - // attach a sensible name/legendgroup - trace.name = trace.name || keys.join("
"); - trace.legendgroup = trace.legendgroup || keys.join("
"); - - // keep track of mapping between this new trace and the trace it targets - // (necessary for updating frames to reflect the selection traces) - trace._originalIndex = i; - trace._newIndex = this.gd._fullData.length + traces.length; - trace._isCrosstalkTrace = true; - traces.push(trace); - } - } - - if (traces.length > 0) { - - Plotly.addTraces(this.gd, traces).then(function(gd) { - // incrementally add selection traces to frames - // (this is heavily inspired by Plotly.Plots.modifyFrames() - // in src/plots/plots.js) - var _hash = gd._transitionData._frameHash; - var _frames = gd._transitionData._frames || []; - - for (var i = 0; i < _frames.length; i++) { - - // add to _frames[i].traces *if* this frame references selected trace(s) - var newIndices = []; - for (var j = 0; j < traces.length; j++) { - var tr = traces[j]; - if (_frames[i].traces.indexOf(tr._originalIndex) > -1) { - newIndices.push(tr._newIndex); - _frames[i].traces.push(tr._newIndex); - } - } - - // nothing to do... - if (newIndices.length === 0) { - continue; - } - - var ctr = 0; - var nFrameTraces = _frames[i].data.length; - - for (var j = 0; j < nFrameTraces; j++) { - var frameTrace = _frames[i].data[j]; - if (!frameTrace.key || frameTrace.set !== group) { - continue; - } - - var matchFunc = getMatchFunc(frameTrace); - var matches = matchFunc(frameTrace.key, keys); - - if (matches.length > 0) { - if (!trace._isSimpleKey) { - frameTrace = subsetArrayAttrs(frameTrace, matches); - } - var d = gd._fullData[newIndices[ctr]]; - if (d.marker) { - frameTrace.marker = d.marker; - } - if (d.line) { - frameTrace.line = d.line; - } - if (d.textfont) { - frameTrace.textfont = d.textfont; - } - ctr = ctr + 1; - _frames[i].data.push(frameTrace); - } - } - - // update gd._transitionData._frameHash - _hash[_frames[i].name] = _frames[i]; - } - - }); - - // dim traces that have a set matching the set of selection sets - var tracesToDim = [], - opacities = [], - sets = Object.keys(this.groupSelections), - n = this.origData.length; - - for (var i = 0; i < n; i++) { - var opacity = this.origOpacity[i] || 1; - // have we already dimmed this trace? Or is this even worth doing? - if (opacity !== this.gd._fullData[i].opacity || this.highlight.opacityDim === 1) { - continue; - } - // is this set an element of the set of selection sets? - var matches = findMatches(sets, [this.gd.data[i].set]); - if (matches.length) { - tracesToDim.push(i); - opacities.push(opacity * this.highlight.opacityDim); - } - } - - if (tracesToDim.length > 0) { - Plotly.restyle(this.gd, {"opacity": opacities}, tracesToDim); - // turn off the selected/unselected API - Plotly.restyle(this.gd, {"selectedpoints": null}); - } - - } - - } -}; - -/* -Note: in all of these match functions, we assume needleSet (i.e. the selected keys) -is a 1D (or flat) array. The real difference is the meaning of haystack. -findMatches() does the usual thing you'd expect for -linked brushing on a scatterplot matrix. findSimpleMatches() returns a match iff -haystack is a subset of the needleSet. findNestedMatches() returns -*/ - -function getMatchFunc(trace) { - return (trace._isNestedKey) ? findNestedMatches : - (trace._isSimpleKey) ? findSimpleMatches : findMatches; -} - -// find matches for "flat" keys -function findMatches(haystack, needleSet) { - var matches = []; - haystack.forEach(function(obj, i) { - if (obj === null || needleSet.indexOf(obj) >= 0) { - matches.push(i); - } - }); - return matches; -} - -// find matches for "simple" keys -function findSimpleMatches(haystack, needleSet) { - var match = haystack.every(function(val) { - return val === null || needleSet.indexOf(val) >= 0; - }); - // yes, this doesn't make much sense other than conforming - // to the output type of the other match functions - return (match) ? [0] : [] -} - -// find matches for a "nested" haystack (2D arrays) -function findNestedMatches(haystack, needleSet) { - var matches = []; - for (var i = 0; i < haystack.length; i++) { - var hay = haystack[i]; - var match = hay.every(function(val) { - return val === null || needleSet.indexOf(val) >= 0; - }); - if (match) { - matches.push(i); - } - } - return matches; -} - -function isPlainObject(obj) { - return ( - Object.prototype.toString.call(obj) === '[object Object]' && - Object.getPrototypeOf(obj) === Object.prototype - ); -} - -function subsetArrayAttrs(obj, indices) { - var newObj = {}; - Object.keys(obj).forEach(function(k) { - var val = obj[k]; - - if (k.charAt(0) === "_") { - newObj[k] = val; - } else if (k === "transforms" && Array.isArray(val)) { - newObj[k] = val.map(function(transform) { - return subsetArrayAttrs(transform, indices); - }); - } else if (k === "colorscale" && Array.isArray(val)) { - newObj[k] = val; - } else if (isPlainObject(val)) { - newObj[k] = subsetArrayAttrs(val, indices); - } else if (Array.isArray(val)) { - newObj[k] = subsetArray(val, indices); - } else { - newObj[k] = val; - } - }); - return newObj; -} - -function subsetArray(arr, indices) { - var result = []; - for (var i = 0; i < indices.length; i++) { - result.push(arr[indices[i]]); - } - return result; -} - -// Convenience function for removing plotly's brush -function removeBrush(el) { - var outlines = el.querySelectorAll(".select-outline"); - for (var i = 0; i < outlines.length; i++) { - outlines[i].remove(); - } -} - - -// https://davidwalsh.name/javascript-debounce-function - -// Returns a function, that, as long as it continues to be invoked, will not -// be triggered. The function will be called after it stops being called for -// N milliseconds. If `immediate` is passed, trigger the function on the -// leading edge, instead of the trailing. -function debounce(func, wait, immediate) { - var timeout; - return function() { - var context = this, args = arguments; - var later = function() { - timeout = null; - if (!immediate) func.apply(context, args); - }; - var callNow = immediate && !timeout; - clearTimeout(timeout); - timeout = setTimeout(later, wait); - if (callNow) func.apply(context, args); - }; -}; diff --git a/_freeze/site_libs/plotly-htmlwidgets-css-2.11.1/plotly-htmlwidgets.css b/_freeze/site_libs/plotly-htmlwidgets-css-2.11.1/plotly-htmlwidgets.css deleted file mode 100644 index f35906d52..000000000 --- a/_freeze/site_libs/plotly-htmlwidgets-css-2.11.1/plotly-htmlwidgets.css +++ /dev/null @@ -1,9 +0,0 @@ -/* -just here so that plotly works -correctly with ioslides. -see https://github.com/ropensci/plotly/issues/463 -*/ - -slide:not(.current) .plotly.html-widget{ - display: none; -} diff --git a/_freeze/site_libs/plotly-main-2.11.1/plotly-latest.min.js b/_freeze/site_libs/plotly-main-2.11.1/plotly-latest.min.js deleted file mode 100644 index 27bfdaa93..000000000 --- a/_freeze/site_libs/plotly-main-2.11.1/plotly-latest.min.js +++ /dev/null @@ -1,69 +0,0 @@ -/** -* plotly.js v2.11.1 -* Copyright 2012-2022, Plotly, Inc. -* All rights reserved. -* Licensed under the MIT license -*/ -!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).Plotly=t()}}((function(){return function t(e,r,n){function i(o,s){if(!r[o]){if(!e[o]){var l="function"==typeof require&&require;if(!s&&l)return l(o,!0);if(a)return a(o,!0);var c=new Error("Cannot find module '"+o+"'");throw c.code="MODULE_NOT_FOUND",c}var u=r[o]={exports:{}};e[o][0].call(u.exports,(function(t){return i(e[o][1][t]||t)}),u,u.exports,t,e,r,n)}return r[o].exports}for(var a="function"==typeof require&&require,o=0;o:not(.watermark)":"opacity:0;-webkit-transition:opacity .3s ease 0s;-moz-transition:opacity .3s ease 0s;-ms-transition:opacity .3s ease 0s;-o-transition:opacity .3s ease 0s;transition:opacity .3s ease 0s;","X:hover .modebar--hover .modebar-group":"opacity:1;","X .modebar-group":"float:left;display:inline-block;box-sizing:border-box;padding-left:8px;position:relative;vertical-align:middle;white-space:nowrap;","X .modebar-btn":"position:relative;font-size:16px;padding:3px 4px;height:22px;cursor:pointer;line-height:normal;box-sizing:border-box;","X .modebar-btn svg":"position:relative;top:2px;","X .modebar.vertical":"display:flex;flex-direction:column;flex-wrap:wrap;align-content:flex-end;max-height:100%;","X .modebar.vertical svg":"top:-1px;","X .modebar.vertical .modebar-group":"display:block;float:none;padding-left:0px;padding-bottom:8px;","X .modebar.vertical .modebar-group .modebar-btn":"display:block;text-align:center;","X [data-title]:before,X [data-title]:after":"position:absolute;-webkit-transform:translate3d(0, 0, 0);-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0);display:none;opacity:0;z-index:1001;pointer-events:none;top:110%;right:50%;","X [data-title]:hover:before,X [data-title]:hover:after":"display:block;opacity:1;","X [data-title]:before":'content:"";position:absolute;background:transparent;border:6px solid transparent;z-index:1002;margin-top:-12px;border-bottom-color:#69738a;margin-right:-6px;',"X [data-title]:after":"content:attr(data-title);background:#69738a;color:#fff;padding:8px 10px;font-size:12px;line-height:12px;white-space:nowrap;margin-right:-18px;border-radius:2px;","X .vertical [data-title]:before,X .vertical [data-title]:after":"top:0%;right:200%;","X .vertical [data-title]:before":"border:6px solid transparent;border-left-color:#69738a;margin-top:8px;margin-right:-30px;","X .select-outline":"fill:none;stroke-width:1;shape-rendering:crispEdges;","X .select-outline-1":"stroke:#fff;","X .select-outline-2":"stroke:#000;stroke-dasharray:2px 2px;",Y:'font-family:"Open Sans",verdana,arial,sans-serif;position:fixed;top:50px;right:20px;z-index:10000;font-size:10pt;max-width:180px;',"Y p":"margin:0;","Y .notifier-note":"min-width:180px;max-width:250px;border:1px solid #fff;z-index:3000;margin:0;background-color:#8c97af;background-color:rgba(140,151,175,.9);color:#fff;padding:10px;overflow-wrap:break-word;word-wrap:break-word;-ms-hyphens:auto;-webkit-hyphens:auto;hyphens:auto;","Y .notifier-close":"color:#fff;opacity:.8;float:right;padding:0 5px;background:none;border:none;font-size:20px;font-weight:bold;line-height:20px;","Y .notifier-close:hover":"color:#444;text-decoration:none;cursor:pointer;"};for(var a in i){var o=a.replace(/^,/," ,").replace(/X/g,".js-plotly-plot .plotly").replace(/Y/g,".plotly-notifier");n.addStyleRule(o,i[a])}},{"../src/lib":498}],2:[function(t,e,r){"use strict";e.exports=t("../src/transforms/aggregate")},{"../src/transforms/aggregate":1113}],3:[function(t,e,r){"use strict";e.exports=t("../src/traces/bar")},{"../src/traces/bar":651}],4:[function(t,e,r){"use strict";e.exports=t("../src/traces/barpolar")},{"../src/traces/barpolar":664}],5:[function(t,e,r){"use strict";e.exports=t("../src/traces/box")},{"../src/traces/box":674}],6:[function(t,e,r){"use strict";e.exports=t("../src/components/calendars")},{"../src/components/calendars":359}],7:[function(t,e,r){"use strict";e.exports=t("../src/traces/candlestick")},{"../src/traces/candlestick":683}],8:[function(t,e,r){"use strict";e.exports=t("../src/traces/carpet")},{"../src/traces/carpet":702}],9:[function(t,e,r){"use strict";e.exports=t("../src/traces/choropleth")},{"../src/traces/choropleth":716}],10:[function(t,e,r){"use strict";e.exports=t("../src/traces/choroplethmapbox")},{"../src/traces/choroplethmapbox":723}],11:[function(t,e,r){"use strict";e.exports=t("../src/traces/cone")},{"../src/traces/cone":729}],12:[function(t,e,r){"use strict";e.exports=t("../src/traces/contour")},{"../src/traces/contour":744}],13:[function(t,e,r){"use strict";e.exports=t("../src/traces/contourcarpet")},{"../src/traces/contourcarpet":755}],14:[function(t,e,r){"use strict";e.exports=t("../src/core")},{"../src/core":476}],15:[function(t,e,r){"use strict";e.exports=t("../src/traces/densitymapbox")},{"../src/traces/densitymapbox":763}],16:[function(t,e,r){"use strict";e.exports=t("../src/transforms/filter")},{"../src/transforms/filter":1114}],17:[function(t,e,r){"use strict";e.exports=t("../src/traces/funnel")},{"../src/traces/funnel":773}],18:[function(t,e,r){"use strict";e.exports=t("../src/traces/funnelarea")},{"../src/traces/funnelarea":782}],19:[function(t,e,r){"use strict";e.exports=t("../src/transforms/groupby")},{"../src/transforms/groupby":1115}],20:[function(t,e,r){"use strict";e.exports=t("../src/traces/heatmap")},{"../src/traces/heatmap":795}],21:[function(t,e,r){"use strict";e.exports=t("../src/traces/heatmapgl")},{"../src/traces/heatmapgl":806}],22:[function(t,e,r){"use strict";e.exports=t("../src/traces/histogram")},{"../src/traces/histogram":818}],23:[function(t,e,r){"use strict";e.exports=t("../src/traces/histogram2d")},{"../src/traces/histogram2d":824}],24:[function(t,e,r){"use strict";e.exports=t("../src/traces/histogram2dcontour")},{"../src/traces/histogram2dcontour":828}],25:[function(t,e,r){"use strict";e.exports=t("../src/traces/icicle")},{"../src/traces/icicle":834}],26:[function(t,e,r){"use strict";e.exports=t("../src/traces/image")},{"../src/traces/image":847}],27:[function(t,e,r){"use strict";var n=t("./core");n.register([t("./bar"),t("./box"),t("./heatmap"),t("./histogram"),t("./histogram2d"),t("./histogram2dcontour"),t("./contour"),t("./scatterternary"),t("./violin"),t("./funnel"),t("./waterfall"),t("./image"),t("./pie"),t("./sunburst"),t("./treemap"),t("./icicle"),t("./funnelarea"),t("./scatter3d"),t("./surface"),t("./isosurface"),t("./volume"),t("./mesh3d"),t("./cone"),t("./streamtube"),t("./scattergeo"),t("./choropleth"),t("./scattergl"),t("./splom"),t("./pointcloud"),t("./heatmapgl"),t("./parcoords"),t("./parcats"),t("./scattermapbox"),t("./choroplethmapbox"),t("./densitymapbox"),t("./sankey"),t("./indicator"),t("./table"),t("./carpet"),t("./scattercarpet"),t("./contourcarpet"),t("./ohlc"),t("./candlestick"),t("./scatterpolar"),t("./scatterpolargl"),t("./barpolar"),t("./scattersmith"),t("./aggregate"),t("./filter"),t("./groupby"),t("./sort"),t("./calendars")]),e.exports=n},{"./aggregate":2,"./bar":3,"./barpolar":4,"./box":5,"./calendars":6,"./candlestick":7,"./carpet":8,"./choropleth":9,"./choroplethmapbox":10,"./cone":11,"./contour":12,"./contourcarpet":13,"./core":14,"./densitymapbox":15,"./filter":16,"./funnel":17,"./funnelarea":18,"./groupby":19,"./heatmap":20,"./heatmapgl":21,"./histogram":22,"./histogram2d":23,"./histogram2dcontour":24,"./icicle":25,"./image":26,"./indicator":28,"./isosurface":29,"./mesh3d":30,"./ohlc":31,"./parcats":32,"./parcoords":33,"./pie":34,"./pointcloud":35,"./sankey":36,"./scatter3d":37,"./scattercarpet":38,"./scattergeo":39,"./scattergl":40,"./scattermapbox":41,"./scatterpolar":42,"./scatterpolargl":43,"./scattersmith":44,"./scatterternary":45,"./sort":46,"./splom":47,"./streamtube":48,"./sunburst":49,"./surface":50,"./table":51,"./treemap":52,"./violin":53,"./volume":54,"./waterfall":55}],28:[function(t,e,r){"use strict";e.exports=t("../src/traces/indicator")},{"../src/traces/indicator":855}],29:[function(t,e,r){"use strict";e.exports=t("../src/traces/isosurface")},{"../src/traces/isosurface":861}],30:[function(t,e,r){"use strict";e.exports=t("../src/traces/mesh3d")},{"../src/traces/mesh3d":866}],31:[function(t,e,r){"use strict";e.exports=t("../src/traces/ohlc")},{"../src/traces/ohlc":871}],32:[function(t,e,r){"use strict";e.exports=t("../src/traces/parcats")},{"../src/traces/parcats":880}],33:[function(t,e,r){"use strict";e.exports=t("../src/traces/parcoords")},{"../src/traces/parcoords":891}],34:[function(t,e,r){"use strict";e.exports=t("../src/traces/pie")},{"../src/traces/pie":902}],35:[function(t,e,r){"use strict";e.exports=t("../src/traces/pointcloud")},{"../src/traces/pointcloud":911}],36:[function(t,e,r){"use strict";e.exports=t("../src/traces/sankey")},{"../src/traces/sankey":917}],37:[function(t,e,r){"use strict";e.exports=t("../src/traces/scatter3d")},{"../src/traces/scatter3d":955}],38:[function(t,e,r){"use strict";e.exports=t("../src/traces/scattercarpet")},{"../src/traces/scattercarpet":962}],39:[function(t,e,r){"use strict";e.exports=t("../src/traces/scattergeo")},{"../src/traces/scattergeo":970}],40:[function(t,e,r){"use strict";e.exports=t("../src/traces/scattergl")},{"../src/traces/scattergl":984}],41:[function(t,e,r){"use strict";e.exports=t("../src/traces/scattermapbox")},{"../src/traces/scattermapbox":994}],42:[function(t,e,r){"use strict";e.exports=t("../src/traces/scatterpolar")},{"../src/traces/scatterpolar":1002}],43:[function(t,e,r){"use strict";e.exports=t("../src/traces/scatterpolargl")},{"../src/traces/scatterpolargl":1010}],44:[function(t,e,r){"use strict";e.exports=t("../src/traces/scattersmith")},{"../src/traces/scattersmith":1017}],45:[function(t,e,r){"use strict";e.exports=t("../src/traces/scatterternary")},{"../src/traces/scatterternary":1025}],46:[function(t,e,r){"use strict";e.exports=t("../src/transforms/sort")},{"../src/transforms/sort":1117}],47:[function(t,e,r){"use strict";e.exports=t("../src/traces/splom")},{"../src/traces/splom":1035}],48:[function(t,e,r){"use strict";e.exports=t("../src/traces/streamtube")},{"../src/traces/streamtube":1043}],49:[function(t,e,r){"use strict";e.exports=t("../src/traces/sunburst")},{"../src/traces/sunburst":1051}],50:[function(t,e,r){"use strict";e.exports=t("../src/traces/surface")},{"../src/traces/surface":1060}],51:[function(t,e,r){"use strict";e.exports=t("../src/traces/table")},{"../src/traces/table":1068}],52:[function(t,e,r){"use strict";e.exports=t("../src/traces/treemap")},{"../src/traces/treemap":1079}],53:[function(t,e,r){"use strict";e.exports=t("../src/traces/violin")},{"../src/traces/violin":1092}],54:[function(t,e,r){"use strict";e.exports=t("../src/traces/volume")},{"../src/traces/volume":1100}],55:[function(t,e,r){"use strict";e.exports=t("../src/traces/waterfall")},{"../src/traces/waterfall":1108}],56:[function(t,e,r){!function(n,i){"object"==typeof r&&void 0!==e?i(r,t("d3-array"),t("d3-collection"),t("d3-shape"),t("elementary-circuits-directed-graph")):i(n.d3=n.d3||{},n.d3,n.d3,n.d3,null)}(this,(function(t,e,r,n,i){"use strict";function a(t){return t.target.depth}function o(t,e){return t.sourceLinks.length?t.depth:e-1}function s(t){return function(){return t}}i=i&&i.hasOwnProperty("default")?i.default:i;var l="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};function c(t,e){return f(t.source,e.source)||t.index-e.index}function u(t,e){return f(t.target,e.target)||t.index-e.index}function f(t,e){return t.partOfCycle===e.partOfCycle?t.y0-e.y0:"top"===t.circularLinkType||"bottom"===e.circularLinkType?-1:1}function h(t){return t.value}function p(t){return(t.y0+t.y1)/2}function d(t){return p(t.source)}function m(t){return p(t.target)}function g(t){return t.index}function v(t){return t.nodes}function y(t){return t.links}function x(t,e){var r=t.get(e);if(!r)throw new Error("missing: "+e);return r}function b(t,e){return e(t)}function _(t,e,r){var n=0;if(null===r){for(var a=[],o=0;o1||i>1)}function M(t,e,r){return t.sort(E),t.forEach((function(n,i){var a,o,s=0;if(q(n,r)&&A(n))n.circularPathData.verticalBuffer=s+n.width/2;else{for(var l=0;lo.source.column)){var c=t[l].circularPathData.verticalBuffer+t[l].width/2+e;s=c>s?c:s}n.circularPathData.verticalBuffer=s+n.width/2}})),t}function S(t,r,i,a){var o=e.min(t.links,(function(t){return t.source.y0}));t.links.forEach((function(t){t.circular&&(t.circularPathData={})})),M(t.links.filter((function(t){return"top"==t.circularLinkType})),r,a),M(t.links.filter((function(t){return"bottom"==t.circularLinkType})),r,a),t.links.forEach((function(e){if(e.circular){if(e.circularPathData.arcRadius=e.width+10,e.circularPathData.leftNodeBuffer=5,e.circularPathData.rightNodeBuffer=5,e.circularPathData.sourceWidth=e.source.x1-e.source.x0,e.circularPathData.sourceX=e.source.x0+e.circularPathData.sourceWidth,e.circularPathData.targetX=e.target.x0,e.circularPathData.sourceY=e.y0,e.circularPathData.targetY=e.y1,q(e,a)&&A(e))e.circularPathData.leftSmallArcRadius=10+e.width/2,e.circularPathData.leftLargeArcRadius=10+e.width/2,e.circularPathData.rightSmallArcRadius=10+e.width/2,e.circularPathData.rightLargeArcRadius=10+e.width/2,"bottom"==e.circularLinkType?(e.circularPathData.verticalFullExtent=e.source.y1+25+e.circularPathData.verticalBuffer,e.circularPathData.verticalLeftInnerExtent=e.circularPathData.verticalFullExtent-e.circularPathData.leftLargeArcRadius,e.circularPathData.verticalRightInnerExtent=e.circularPathData.verticalFullExtent-e.circularPathData.rightLargeArcRadius):(e.circularPathData.verticalFullExtent=e.source.y0-25-e.circularPathData.verticalBuffer,e.circularPathData.verticalLeftInnerExtent=e.circularPathData.verticalFullExtent+e.circularPathData.leftLargeArcRadius,e.circularPathData.verticalRightInnerExtent=e.circularPathData.verticalFullExtent+e.circularPathData.rightLargeArcRadius);else{var s=e.source.column,l=e.circularLinkType,c=t.links.filter((function(t){return t.source.column==s&&t.circularLinkType==l}));"bottom"==e.circularLinkType?c.sort(C):c.sort(L);var u=0;c.forEach((function(t,n){t.circularLinkID==e.circularLinkID&&(e.circularPathData.leftSmallArcRadius=10+e.width/2+u,e.circularPathData.leftLargeArcRadius=10+e.width/2+n*r+u),u+=t.width})),s=e.target.column,c=t.links.filter((function(t){return t.target.column==s&&t.circularLinkType==l})),"bottom"==e.circularLinkType?c.sort(I):c.sort(P),u=0,c.forEach((function(t,n){t.circularLinkID==e.circularLinkID&&(e.circularPathData.rightSmallArcRadius=10+e.width/2+u,e.circularPathData.rightLargeArcRadius=10+e.width/2+n*r+u),u+=t.width})),"bottom"==e.circularLinkType?(e.circularPathData.verticalFullExtent=Math.max(i,e.source.y1,e.target.y1)+25+e.circularPathData.verticalBuffer,e.circularPathData.verticalLeftInnerExtent=e.circularPathData.verticalFullExtent-e.circularPathData.leftLargeArcRadius,e.circularPathData.verticalRightInnerExtent=e.circularPathData.verticalFullExtent-e.circularPathData.rightLargeArcRadius):(e.circularPathData.verticalFullExtent=o-25-e.circularPathData.verticalBuffer,e.circularPathData.verticalLeftInnerExtent=e.circularPathData.verticalFullExtent+e.circularPathData.leftLargeArcRadius,e.circularPathData.verticalRightInnerExtent=e.circularPathData.verticalFullExtent+e.circularPathData.rightLargeArcRadius)}e.circularPathData.leftInnerExtent=e.circularPathData.sourceX+e.circularPathData.leftNodeBuffer,e.circularPathData.rightInnerExtent=e.circularPathData.targetX-e.circularPathData.rightNodeBuffer,e.circularPathData.leftFullExtent=e.circularPathData.sourceX+e.circularPathData.leftLargeArcRadius+e.circularPathData.leftNodeBuffer,e.circularPathData.rightFullExtent=e.circularPathData.targetX-e.circularPathData.rightLargeArcRadius-e.circularPathData.rightNodeBuffer}if(e.circular)e.path=function(t){var e="";e="top"==t.circularLinkType?"M"+t.circularPathData.sourceX+" "+t.circularPathData.sourceY+" L"+t.circularPathData.leftInnerExtent+" "+t.circularPathData.sourceY+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftSmallArcRadius+" 0 0 0 "+t.circularPathData.leftFullExtent+" "+(t.circularPathData.sourceY-t.circularPathData.leftSmallArcRadius)+" L"+t.circularPathData.leftFullExtent+" "+t.circularPathData.verticalLeftInnerExtent+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftLargeArcRadius+" 0 0 0 "+t.circularPathData.leftInnerExtent+" "+t.circularPathData.verticalFullExtent+" L"+t.circularPathData.rightInnerExtent+" "+t.circularPathData.verticalFullExtent+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightLargeArcRadius+" 0 0 0 "+t.circularPathData.rightFullExtent+" "+t.circularPathData.verticalRightInnerExtent+" L"+t.circularPathData.rightFullExtent+" "+(t.circularPathData.targetY-t.circularPathData.rightSmallArcRadius)+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightSmallArcRadius+" 0 0 0 "+t.circularPathData.rightInnerExtent+" "+t.circularPathData.targetY+" L"+t.circularPathData.targetX+" "+t.circularPathData.targetY:"M"+t.circularPathData.sourceX+" "+t.circularPathData.sourceY+" L"+t.circularPathData.leftInnerExtent+" "+t.circularPathData.sourceY+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftSmallArcRadius+" 0 0 1 "+t.circularPathData.leftFullExtent+" "+(t.circularPathData.sourceY+t.circularPathData.leftSmallArcRadius)+" L"+t.circularPathData.leftFullExtent+" "+t.circularPathData.verticalLeftInnerExtent+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftLargeArcRadius+" 0 0 1 "+t.circularPathData.leftInnerExtent+" "+t.circularPathData.verticalFullExtent+" L"+t.circularPathData.rightInnerExtent+" "+t.circularPathData.verticalFullExtent+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightLargeArcRadius+" 0 0 1 "+t.circularPathData.rightFullExtent+" "+t.circularPathData.verticalRightInnerExtent+" L"+t.circularPathData.rightFullExtent+" "+(t.circularPathData.targetY+t.circularPathData.rightSmallArcRadius)+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightSmallArcRadius+" 0 0 1 "+t.circularPathData.rightInnerExtent+" "+t.circularPathData.targetY+" L"+t.circularPathData.targetX+" "+t.circularPathData.targetY;return e}(e);else{var f=n.linkHorizontal().source((function(t){return[t.source.x0+(t.source.x1-t.source.x0),t.y0]})).target((function(t){return[t.target.x0,t.y1]}));e.path=f(e)}}))}function E(t,e){return O(t)==O(e)?"bottom"==t.circularLinkType?C(t,e):L(t,e):O(e)-O(t)}function L(t,e){return t.y0-e.y0}function C(t,e){return e.y0-t.y0}function P(t,e){return t.y1-e.y1}function I(t,e){return e.y1-t.y1}function O(t){return t.target.column-t.source.column}function z(t){return t.target.x0-t.source.x1}function D(t,e){var r=T(t),n=z(e)/Math.tan(r);return"up"==H(t)?t.y1+n:t.y1-n}function R(t,e){var r=T(t),n=z(e)/Math.tan(r);return"up"==H(t)?t.y1-n:t.y1+n}function F(t,e,r,n){t.links.forEach((function(i){if(!i.circular&&i.target.column-i.source.column>1){var a=i.source.column+1,o=i.target.column-1,s=1,l=o-a+1;for(s=1;a<=o;a++,s++)t.nodes.forEach((function(o){if(o.column==a){var c,u=s/(l+1),f=Math.pow(1-u,3),h=3*u*Math.pow(1-u,2),p=3*Math.pow(u,2)*(1-u),d=Math.pow(u,3),m=f*i.y0+h*i.y0+p*i.y1+d*i.y1,g=m-i.width/2,v=m+i.width/2;g>o.y0&&go.y0&&vo.y1)&&(c=v-o.y0+10,o=N(o,c,e,r),t.nodes.forEach((function(t){b(t,n)!=b(o,n)&&t.column==o.column&&t.y0o.y1&&N(t,c,e,r)})))}}))}}))}function B(t,e){return t.y0>e.y0&&t.y0e.y0&&t.y1e.y1)}function N(t,e,r,n){return t.y0+e>=r&&t.y1+e<=n&&(t.y0=t.y0+e,t.y1=t.y1+e,t.targetLinks.forEach((function(t){t.y1=t.y1+e})),t.sourceLinks.forEach((function(t){t.y0=t.y0+e}))),t}function j(t,e,r,n){t.nodes.forEach((function(i){n&&i.y+(i.y1-i.y0)>e&&(i.y=i.y-(i.y+(i.y1-i.y0)-e));var a=t.links.filter((function(t){return b(t.source,r)==b(i,r)})),o=a.length;o>1&&a.sort((function(t,e){if(!t.circular&&!e.circular){if(t.target.column==e.target.column)return t.y1-e.y1;if(!V(t,e))return t.y1-e.y1;if(t.target.column>e.target.column){var r=R(e,t);return t.y1-r}if(e.target.column>t.target.column)return R(t,e)-e.y1}return t.circular&&!e.circular?"top"==t.circularLinkType?-1:1:e.circular&&!t.circular?"top"==e.circularLinkType?1:-1:t.circular&&e.circular?t.circularLinkType===e.circularLinkType&&"top"==t.circularLinkType?t.target.column===e.target.column?t.target.y1-e.target.y1:e.target.column-t.target.column:t.circularLinkType===e.circularLinkType&&"bottom"==t.circularLinkType?t.target.column===e.target.column?e.target.y1-t.target.y1:t.target.column-e.target.column:"top"==t.circularLinkType?-1:1:void 0}));var s=i.y0;a.forEach((function(t){t.y0=s+t.width/2,s+=t.width})),a.forEach((function(t,e){if("bottom"==t.circularLinkType){for(var r=e+1,n=0;r1&&n.sort((function(t,e){if(!t.circular&&!e.circular){if(t.source.column==e.source.column)return t.y0-e.y0;if(!V(t,e))return t.y0-e.y0;if(e.source.column0?"up":"down"}function q(t,e){return b(t.source,e)==b(t.target,e)}function G(t,r,n){var i=t.nodes,a=t.links,o=!1,s=!1;if(a.forEach((function(t){"top"==t.circularLinkType?o=!0:"bottom"==t.circularLinkType&&(s=!0)})),0==o||0==s){var l=e.min(i,(function(t){return t.y0})),c=(n-r)/(e.max(i,(function(t){return t.y1}))-l);i.forEach((function(t){var e=(t.y1-t.y0)*c;t.y0=(t.y0-l)*c,t.y1=t.y0+e})),a.forEach((function(t){t.y0=(t.y0-l)*c,t.y1=(t.y1-l)*c,t.width=t.width*c}))}}t.sankeyCircular=function(){var t,n,i=0,a=0,b=1,T=1,A=24,M=g,E=o,L=v,C=y,P=32,I=2,O=null;function z(){var t={nodes:L.apply(null,arguments),links:C.apply(null,arguments)};D(t),_(t,M,O),R(t),B(t),w(t,M),N(t,P,M),V(t);for(var e=4,r=0;r0?r+25+10:r,bottom:n=n>0?n+25+10:n,left:a=a>0?a+25+10:a,right:i=i>0?i+25+10:i}}(o),f=function(t,r){var n=e.max(t.nodes,(function(t){return t.column})),o=b-i,s=T-a,l=o/(o+r.right+r.left),c=s/(s+r.top+r.bottom);return i=i*l+r.left,b=0==r.right?b:b*l,a=a*c+r.top,T*=c,t.nodes.forEach((function(t){t.x0=i+t.column*((b-i-A)/n),t.x1=t.x0+A})),c}(o,u);l*=f,o.links.forEach((function(t){t.width=t.value*l})),c.forEach((function(t){var e=t.length;t.forEach((function(t,n){t.depth==c.length-1&&1==e||0==t.depth&&1==e?(t.y0=T/2-t.value*l,t.y1=t.y0+t.value*l):t.partOfCycle?0==k(t,r)?(t.y0=T/2+n,t.y1=t.y0+t.value*l):"top"==t.circularLinkType?(t.y0=a+n,t.y1=t.y0+t.value*l):(t.y0=T-t.value*l-n,t.y1=t.y0+t.value*l):0==u.top||0==u.bottom?(t.y0=(T-a)/e*n,t.y1=t.y0+t.value*l):(t.y0=(T-a)/2-e/2+n,t.y1=t.y0+t.value*l)}))}))}(l),y();for(var u=1,g=s;g>0;--g)v(u*=.99,l),y();function v(t,r){var n=c.length;c.forEach((function(i){var a=i.length,o=i[0].depth;i.forEach((function(i){var s;if(i.sourceLinks.length||i.targetLinks.length)if(i.partOfCycle&&k(i,r)>0);else if(0==o&&1==a)s=i.y1-i.y0,i.y0=T/2-s/2,i.y1=T/2+s/2;else if(o==n-1&&1==a)s=i.y1-i.y0,i.y0=T/2-s/2,i.y1=T/2+s/2;else{var l=e.mean(i.sourceLinks,m),c=e.mean(i.targetLinks,d),u=((l&&c?(l+c)/2:l||c)-p(i))*t;i.y0+=u,i.y1+=u}}))}))}function y(){c.forEach((function(e){var r,n,i,o=a,s=e.length;for(e.sort(f),i=0;i0&&(r.y0+=n,r.y1+=n),o=r.y1+t;if((n=o-t-T)>0)for(o=r.y0-=n,r.y1-=n,i=s-2;i>=0;--i)(n=(r=e[i]).y1+t-o)>0&&(r.y0-=n,r.y1-=n),o=r.y0}))}}function V(t){t.nodes.forEach((function(t){t.sourceLinks.sort(u),t.targetLinks.sort(c)})),t.nodes.forEach((function(t){var e=t.y0,r=e,n=t.y1,i=n;t.sourceLinks.forEach((function(t){t.circular?(t.y0=n-t.width/2,n-=t.width):(t.y0=e+t.width/2,e+=t.width)})),t.targetLinks.forEach((function(t){t.circular?(t.y1=i-t.width/2,i-=t.width):(t.y1=r+t.width/2,r+=t.width)}))}))}return z.nodeId=function(t){return arguments.length?(M="function"==typeof t?t:s(t),z):M},z.nodeAlign=function(t){return arguments.length?(E="function"==typeof t?t:s(t),z):E},z.nodeWidth=function(t){return arguments.length?(A=+t,z):A},z.nodePadding=function(e){return arguments.length?(t=+e,z):t},z.nodes=function(t){return arguments.length?(L="function"==typeof t?t:s(t),z):L},z.links=function(t){return arguments.length?(C="function"==typeof t?t:s(t),z):C},z.size=function(t){return arguments.length?(i=a=0,b=+t[0],T=+t[1],z):[b-i,T-a]},z.extent=function(t){return arguments.length?(i=+t[0][0],b=+t[1][0],a=+t[0][1],T=+t[1][1],z):[[i,a],[b,T]]},z.iterations=function(t){return arguments.length?(P=+t,z):P},z.circularLinkGap=function(t){return arguments.length?(I=+t,z):I},z.nodePaddingRatio=function(t){return arguments.length?(n=+t,z):n},z.sortNodes=function(t){return arguments.length?(O=t,z):O},z.update=function(t){return w(t,M),V(t),t.links.forEach((function(t){t.circular&&(t.circularLinkType=t.y0+t.y1a&&(b=a);var o=e.min(i,(function(t){return(y-n-(t.length-1)*b)/e.sum(t,u)}));i.forEach((function(t){t.forEach((function(t,e){t.y1=(t.y0=e)+t.value*o}))})),t.links.forEach((function(t){t.width=t.value*o}))}(),d();for(var a=1,o=A;o>0;--o)l(a*=.99),d(),s(a),d();function s(t){i.forEach((function(r){r.forEach((function(r){if(r.targetLinks.length){var n=(e.sum(r.targetLinks,h)/e.sum(r.targetLinks,u)-f(r))*t;r.y0+=n,r.y1+=n}}))}))}function l(t){i.slice().reverse().forEach((function(r){r.forEach((function(r){if(r.sourceLinks.length){var n=(e.sum(r.sourceLinks,p)/e.sum(r.sourceLinks,u)-f(r))*t;r.y0+=n,r.y1+=n}}))}))}function d(){i.forEach((function(t){var e,r,i,a=n,o=t.length;for(t.sort(c),i=0;i0&&(e.y0+=r,e.y1+=r),a=e.y1+b;if((r=a-b-y)>0)for(a=e.y0-=r,e.y1-=r,i=o-2;i>=0;--i)(r=(e=t[i]).y1+b-a)>0&&(e.y0-=r,e.y1-=r),a=e.y0}))}}function P(t){t.nodes.forEach((function(t){t.sourceLinks.sort(l),t.targetLinks.sort(s)})),t.nodes.forEach((function(t){var e=t.y0,r=e;t.sourceLinks.forEach((function(t){t.y0=e+t.width/2,e+=t.width})),t.targetLinks.forEach((function(t){t.y1=r+t.width/2,r+=t.width}))}))}return M.update=function(t){return P(t),t},M.nodeId=function(t){return arguments.length?(_="function"==typeof t?t:o(t),M):_},M.nodeAlign=function(t){return arguments.length?(w="function"==typeof t?t:o(t),M):w},M.nodeWidth=function(t){return arguments.length?(x=+t,M):x},M.nodePadding=function(t){return arguments.length?(b=+t,M):b},M.nodes=function(t){return arguments.length?(T="function"==typeof t?t:o(t),M):T},M.links=function(t){return arguments.length?(k="function"==typeof t?t:o(t),M):k},M.size=function(e){return arguments.length?(t=n=0,i=+e[0],y=+e[1],M):[i-t,y-n]},M.extent=function(e){return arguments.length?(t=+e[0][0],i=+e[1][0],n=+e[0][1],y=+e[1][1],M):[[t,n],[i,y]]},M.iterations=function(t){return arguments.length?(A=+t,M):A},M},t.sankeyCenter=function(t){return t.targetLinks.length?t.depth:t.sourceLinks.length?e.min(t.sourceLinks,i)-1:0},t.sankeyLeft=function(t){return t.depth},t.sankeyRight=function(t,e){return e-1-t.height},t.sankeyJustify=a,t.sankeyLinkHorizontal=function(){return n.linkHorizontal().source(y).target(x)},Object.defineProperty(t,"__esModule",{value:!0})}))},{"d3-array":102,"d3-collection":103,"d3-shape":114}],58:[function(t,e,r){(function(){var t={version:"3.8.0"},r=[].slice,n=function(t){return r.call(t)},i=self.document;function a(t){return t&&(t.ownerDocument||t.document||t).documentElement}function o(t){return t&&(t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView)}if(i)try{n(i.documentElement.childNodes)[0].nodeType}catch(t){n=function(t){for(var e=t.length,r=new Array(e);e--;)r[e]=t[e];return r}}if(Date.now||(Date.now=function(){return+new Date}),i)try{i.createElement("DIV").style.setProperty("opacity",0,"")}catch(t){var s=this.Element.prototype,l=s.setAttribute,c=s.setAttributeNS,u=this.CSSStyleDeclaration.prototype,f=u.setProperty;s.setAttribute=function(t,e){l.call(this,t,e+"")},s.setAttributeNS=function(t,e,r){c.call(this,t,e,r+"")},u.setProperty=function(t,e,r){f.call(this,t,e+"",r)}}function h(t,e){return te?1:t>=e?0:NaN}function p(t){return null===t?NaN:+t}function d(t){return!isNaN(t)}function m(t){return{left:function(e,r,n,i){for(arguments.length<3&&(n=0),arguments.length<4&&(i=e.length);n>>1;t(e[a],r)<0?n=a+1:i=a}return n},right:function(e,r,n,i){for(arguments.length<3&&(n=0),arguments.length<4&&(i=e.length);n>>1;t(e[a],r)>0?i=a:n=a+1}return n}}}t.ascending=h,t.descending=function(t,e){return et?1:e>=t?0:NaN},t.min=function(t,e){var r,n,i=-1,a=t.length;if(1===arguments.length){for(;++i=n){r=n;break}for(;++in&&(r=n)}else{for(;++i=n){r=n;break}for(;++in&&(r=n)}return r},t.max=function(t,e){var r,n,i=-1,a=t.length;if(1===arguments.length){for(;++i=n){r=n;break}for(;++ir&&(r=n)}else{for(;++i=n){r=n;break}for(;++ir&&(r=n)}return r},t.extent=function(t,e){var r,n,i,a=-1,o=t.length;if(1===arguments.length){for(;++a=n){r=i=n;break}for(;++an&&(r=n),i=n){r=i=n;break}for(;++an&&(r=n),i1)return o/(l-1)},t.deviation=function(){var e=t.variance.apply(this,arguments);return e?Math.sqrt(e):e};var g=m(h);function v(t){return t.length}t.bisectLeft=g.left,t.bisect=t.bisectRight=g.right,t.bisector=function(t){return m(1===t.length?function(e,r){return h(t(e),r)}:t)},t.shuffle=function(t,e,r){(a=arguments.length)<3&&(r=t.length,a<2&&(e=0));for(var n,i,a=r-e;a;)i=Math.random()*a--|0,n=t[a+e],t[a+e]=t[i+e],t[i+e]=n;return t},t.permute=function(t,e){for(var r=e.length,n=new Array(r);r--;)n[r]=t[e[r]];return n},t.pairs=function(t){for(var e=0,r=t.length-1,n=t[0],i=new Array(r<0?0:r);e=0;)for(e=(n=t[i]).length;--e>=0;)r[--o]=n[e];return r};var y=Math.abs;function x(t){for(var e=1;t*e%1;)e*=10;return e}function b(t,e){for(var r in e)Object.defineProperty(t.prototype,r,{value:e[r],enumerable:!1})}function _(){this._=Object.create(null)}t.range=function(t,e,r){if(arguments.length<3&&(r=1,arguments.length<2&&(e=t,t=0)),(e-t)/r==1/0)throw new Error("infinite range");var n,i=[],a=x(y(r)),o=-1;if(t*=a,e*=a,(r*=a)<0)for(;(n=t+r*++o)>e;)i.push(n/a);else for(;(n=t+r*++o)=i.length)return r?r.call(n,a):e?a.sort(e):a;for(var l,c,u,f,h=-1,p=a.length,d=i[s++],m=new _;++h=i.length)return e;var n=[],o=a[r++];return e.forEach((function(e,i){n.push({key:e,values:t(i,r)})})),o?n.sort((function(t,e){return o(t.key,e.key)})):n}(o(t.map,e,0),0)},n.key=function(t){return i.push(t),n},n.sortKeys=function(t){return a[i.length-1]=t,n},n.sortValues=function(t){return e=t,n},n.rollup=function(t){return r=t,n},n},t.set=function(t){var e=new L;if(t)for(var r=0,n=t.length;r=0&&(n=t.slice(r+1),t=t.slice(0,r)),t)return arguments.length<2?this[t].on(n):this[t].on(n,e);if(2===arguments.length){if(null==e)for(t in this)this.hasOwnProperty(t)&&this[t].on(n,null);return this}},t.event=null,t.requote=function(t){return t.replace(j,"\\$&")};var j=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,U={}.__proto__?function(t,e){t.__proto__=e}:function(t,e){for(var r in e)t[r]=e[r]};function V(t){return U(t,Y),t}var H=function(t,e){return e.querySelector(t)},q=function(t,e){return e.querySelectorAll(t)},G=function(t,e){var r=t.matches||t[I(t,"matchesSelector")];return(G=function(t,e){return r.call(t,e)})(t,e)};"function"==typeof Sizzle&&(H=function(t,e){return Sizzle(t,e)[0]||null},q=Sizzle,G=Sizzle.matchesSelector),t.selection=function(){return t.select(i.documentElement)};var Y=t.selection.prototype=[];function W(t){return"function"==typeof t?t:function(){return H(t,this)}}function X(t){return"function"==typeof t?t:function(){return q(t,this)}}Y.select=function(t){var e,r,n,i,a=[];t=W(t);for(var o=-1,s=this.length;++o=0&&"xmlns"!==(r=t.slice(0,e))&&(t=t.slice(e+1)),J.hasOwnProperty(r)?{space:J[r],local:t}:t}},Y.attr=function(e,r){if(arguments.length<2){if("string"==typeof e){var n=this.node();return(e=t.ns.qualify(e)).local?n.getAttributeNS(e.space,e.local):n.getAttribute(e)}for(r in e)this.each(K(r,e[r]));return this}return this.each(K(e,r))},Y.classed=function(t,e){if(arguments.length<2){if("string"==typeof t){var r=this.node(),n=(t=tt(t)).length,i=-1;if(e=r.classList){for(;++i=0;)(r=n[i])&&(a&&a!==r.nextSibling&&a.parentNode.insertBefore(r,a),a=r);return this},Y.sort=function(t){t=ct.apply(this,arguments);for(var e=-1,r=this.length;++e=e&&(e=i+1);!(o=s[e])&&++e0&&(e=e.slice(0,o));var l=mt.get(e);function c(){var t=this[a];t&&(this.removeEventListener(e,t,t.$),delete this[a])}return l&&(e=l,s=vt),o?r?function(){var t=s(r,n(arguments));c.call(this),this.addEventListener(e,this[a]=t,t.$=i),t._=r}:c:r?z:function(){var r,n=new RegExp("^__on([^.]+)"+t.requote(e)+"$");for(var i in this)if(r=i.match(n)){var a=this[i];this.removeEventListener(r[1],a,a.$),delete this[i]}}}t.selection.enter=ft,t.selection.enter.prototype=ht,ht.append=Y.append,ht.empty=Y.empty,ht.node=Y.node,ht.call=Y.call,ht.size=Y.size,ht.select=function(t){for(var e,r,n,i,a,o=[],s=-1,l=this.length;++s1?Et:t<-1?-Et:Math.asin(t)}function It(t){return((t=Math.exp(t))+1/t)/2}var Ot=Math.SQRT2;t.interpolateZoom=function(t,e){var r,n,i=t[0],a=t[1],o=t[2],s=e[0],l=e[1],c=e[2],u=s-i,f=l-a,h=u*u+f*f;if(h<1e-12)n=Math.log(c/o)/Ot,r=function(t){return[i+t*u,a+t*f,o*Math.exp(Ot*t*n)]};else{var p=Math.sqrt(h),d=(c*c-o*o+4*h)/(2*o*2*p),m=(c*c-o*o-4*h)/(2*c*2*p),g=Math.log(Math.sqrt(d*d+1)-d),v=Math.log(Math.sqrt(m*m+1)-m);n=(v-g)/Ot,r=function(t){var e,r=t*n,s=It(g),l=o/(2*p)*(s*(e=Ot*r+g,((e=Math.exp(2*e))-1)/(e+1))-function(t){return((t=Math.exp(t))-1/t)/2}(g));return[i+l*u,a+l*f,o*s/It(Ot*r+g)]}}return r.duration=1e3*n,r},t.behavior.zoom=function(){var e,r,n,a,s,l,c,u,f,h={x:0,y:0,k:1},p=[960,500],d=Rt,m=250,g=0,v="mousedown.zoom",y="mousemove.zoom",x="mouseup.zoom",b="touchstart.zoom",_=N(w,"zoomstart","zoom","zoomend");function w(t){t.on(v,P).on(Dt+".zoom",O).on("dblclick.zoom",z).on(b,I)}function T(t){return[(t[0]-h.x)/h.k,(t[1]-h.y)/h.k]}function k(t){h.k=Math.max(d[0],Math.min(d[1],t))}function A(t,e){e=function(t){return[t[0]*h.k+h.x,t[1]*h.k+h.y]}(e),h.x+=t[0]-e[0],h.y+=t[1]-e[1]}function M(e,n,i,a){e.__chart__={x:h.x,y:h.y,k:h.k},k(Math.pow(2,a)),A(r=n,i),e=t.select(e),m>0&&(e=e.transition().duration(m)),e.call(w.event)}function S(){c&&c.domain(l.range().map((function(t){return(t-h.x)/h.k})).map(l.invert)),f&&f.domain(u.range().map((function(t){return(t-h.y)/h.k})).map(u.invert))}function E(t){g++||t({type:"zoomstart"})}function L(t){S(),t({type:"zoom",scale:h.k,translate:[h.x,h.y]})}function C(t){--g||(t({type:"zoomend"}),r=null)}function P(){var e=this,r=_.of(e,arguments),n=0,i=t.select(o(e)).on(y,l).on(x,c),a=T(t.mouse(e)),s=bt(e);function l(){n=1,A(t.mouse(e),a),L(r)}function c(){i.on(y,null).on(x,null),s(n),C(r)}Di.call(e),E(r)}function I(){var e,r=this,n=_.of(r,arguments),i={},a=0,o=".zoom-"+t.event.changedTouches[0].identifier,l="touchmove"+o,c="touchend"+o,u=[],f=t.select(r),p=bt(r);function d(){var n=t.touches(r);return e=h.k,n.forEach((function(t){t.identifier in i&&(i[t.identifier]=T(t))})),n}function m(){var e=t.event.target;t.select(e).on(l,g).on(c,y),u.push(e);for(var n=t.event.changedTouches,o=0,f=n.length;o1){v=p[0];var x=p[1],b=v[0]-x[0],_=v[1]-x[1];a=b*b+_*_}}function g(){var o,l,c,u,f=t.touches(r);Di.call(r);for(var h=0,p=f.length;h360?t-=360:t<0&&(t+=360),t<60?n+(i-n)*t/60:t<180?i:t<240?n+(i-n)*(240-t)/60:n}(t))}return t=isNaN(t)?0:(t%=360)<0?t+360:t,e=isNaN(e)||e<0?0:e>1?1:e,n=2*(r=r<0?0:r>1?1:r)-(i=r<=.5?r*(1+e):r+e-r*e),new Qt(a(t+120),a(t),a(t-120))}function Ut(e,r,n){return this instanceof Ut?(this.h=+e,this.c=+r,void(this.l=+n)):arguments.length<2?e instanceof Ut?new Ut(e.h,e.c,e.l):Xt(e instanceof qt?e.l:(e=ae((e=t.rgb(e)).r,e.g,e.b)).l,e.a,e.b):new Ut(e,r,n)}Nt.brighter=function(t){return t=Math.pow(.7,arguments.length?t:1),new Bt(this.h,this.s,this.l/t)},Nt.darker=function(t){return t=Math.pow(.7,arguments.length?t:1),new Bt(this.h,this.s,t*this.l)},Nt.rgb=function(){return jt(this.h,this.s,this.l)},t.hcl=Ut;var Vt=Ut.prototype=new Ft;function Ht(t,e,r){return isNaN(t)&&(t=0),isNaN(e)&&(e=0),new qt(r,Math.cos(t*=Lt)*e,Math.sin(t)*e)}function qt(t,e,r){return this instanceof qt?(this.l=+t,this.a=+e,void(this.b=+r)):arguments.length<2?t instanceof qt?new qt(t.l,t.a,t.b):t instanceof Ut?Ht(t.h,t.c,t.l):ae((t=Qt(t)).r,t.g,t.b):new qt(t,e,r)}Vt.brighter=function(t){return new Ut(this.h,this.c,Math.min(100,this.l+Gt*(arguments.length?t:1)))},Vt.darker=function(t){return new Ut(this.h,this.c,Math.max(0,this.l-Gt*(arguments.length?t:1)))},Vt.rgb=function(){return Ht(this.h,this.c,this.l).rgb()},t.lab=qt;var Gt=18,Yt=qt.prototype=new Ft;function Wt(t,e,r){var n=(t+16)/116,i=n+e/500,a=n-r/200;return new Qt(Kt(3.2404542*(i=.95047*Zt(i))-1.5371385*(n=1*Zt(n))-.4985314*(a=1.08883*Zt(a))),Kt(-.969266*i+1.8760108*n+.041556*a),Kt(.0556434*i-.2040259*n+1.0572252*a))}function Xt(t,e,r){return t>0?new Ut(Math.atan2(r,e)*Ct,Math.sqrt(e*e+r*r),t):new Ut(NaN,NaN,t)}function Zt(t){return t>.206893034?t*t*t:(t-4/29)/7.787037}function Jt(t){return t>.008856?Math.pow(t,1/3):7.787037*t+4/29}function Kt(t){return Math.round(255*(t<=.00304?12.92*t:1.055*Math.pow(t,1/2.4)-.055))}function Qt(t,e,r){return this instanceof Qt?(this.r=~~t,this.g=~~e,void(this.b=~~r)):arguments.length<2?t instanceof Qt?new Qt(t.r,t.g,t.b):ne(""+t,Qt,jt):new Qt(t,e,r)}function $t(t){return new Qt(t>>16,t>>8&255,255&t)}function te(t){return $t(t)+""}Yt.brighter=function(t){return new qt(Math.min(100,this.l+Gt*(arguments.length?t:1)),this.a,this.b)},Yt.darker=function(t){return new qt(Math.max(0,this.l-Gt*(arguments.length?t:1)),this.a,this.b)},Yt.rgb=function(){return Wt(this.l,this.a,this.b)},t.rgb=Qt;var ee=Qt.prototype=new Ft;function re(t){return t<16?"0"+Math.max(0,t).toString(16):Math.min(255,t).toString(16)}function ne(t,e,r){var n,i,a,o=0,s=0,l=0;if(n=/([a-z]+)\((.*)\)/.exec(t=t.toLowerCase()))switch(i=n[2].split(","),n[1]){case"hsl":return r(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return e(se(i[0]),se(i[1]),se(i[2]))}return(a=le.get(t))?e(a.r,a.g,a.b):(null==t||"#"!==t.charAt(0)||isNaN(a=parseInt(t.slice(1),16))||(4===t.length?(o=(3840&a)>>4,o|=o>>4,s=240&a,s|=s>>4,l=15&a,l|=l<<4):7===t.length&&(o=(16711680&a)>>16,s=(65280&a)>>8,l=255&a)),e(o,s,l))}function ie(t,e,r){var n,i,a=Math.min(t/=255,e/=255,r/=255),o=Math.max(t,e,r),s=o-a,l=(o+a)/2;return s?(i=l<.5?s/(o+a):s/(2-o-a),n=t==o?(e-r)/s+(e0&&l<1?0:n),new Bt(n,i,l)}function ae(t,e,r){var n=Jt((.4124564*(t=oe(t))+.3575761*(e=oe(e))+.1804375*(r=oe(r)))/.95047),i=Jt((.2126729*t+.7151522*e+.072175*r)/1);return qt(116*i-16,500*(n-i),200*(i-Jt((.0193339*t+.119192*e+.9503041*r)/1.08883)))}function oe(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function se(t){var e=parseFloat(t);return"%"===t.charAt(t.length-1)?Math.round(2.55*e):e}ee.brighter=function(t){t=Math.pow(.7,arguments.length?t:1);var e=this.r,r=this.g,n=this.b,i=30;return e||r||n?(e&&e=200&&e<300||304===e){try{t=i.call(o,c)}catch(t){return void s.error.call(o,t)}s.load.call(o,t)}else s.error.call(o,c)}return self.XDomainRequest&&!("withCredentials"in c)&&/^(http(s)?:)?\/\//.test(e)&&(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=f:c.onreadystatechange=function(){c.readyState>3&&f()},c.onprogress=function(e){var r=t.event;t.event=e;try{s.progress.call(o,c)}finally{t.event=r}},o.header=function(t,e){return t=(t+"").toLowerCase(),arguments.length<2?l[t]:(null==e?delete l[t]:l[t]=e+"",o)},o.mimeType=function(t){return arguments.length?(r=null==t?null:t+"",o):r},o.responseType=function(t){return arguments.length?(u=t,o):u},o.response=function(t){return i=t,o},["get","post"].forEach((function(t){o[t]=function(){return o.send.apply(o,[t].concat(n(arguments)))}})),o.send=function(t,n,i){if(2===arguments.length&&"function"==typeof n&&(i=n,n=null),c.open(t,e,!0),null==r||"accept"in l||(l.accept=r+",*/*"),c.setRequestHeader)for(var a in l)c.setRequestHeader(a,l[a]);return null!=r&&c.overrideMimeType&&c.overrideMimeType(r),null!=u&&(c.responseType=u),null!=i&&o.on("error",i).on("load",(function(t){i(null,t)})),s.beforesend.call(o,c),c.send(null==n?null:n),o},o.abort=function(){return c.abort(),o},t.rebind(o,s,"on"),null==a?o:o.get(function(t){return 1===t.length?function(e,r){t(null==e?r:null)}:t}(a))}le.forEach((function(t,e){le.set(t,$t(e))})),t.functor=ce,t.xhr=ue(C),t.dsv=function(t,e){var r=new RegExp('["'+t+"\n]"),n=t.charCodeAt(0);function i(t,r,n){arguments.length<3&&(n=r,r=null);var i=fe(t,e,null==r?a:o(r),n);return i.row=function(t){return arguments.length?i.response(null==(r=t)?a:o(t)):r},i}function a(t){return i.parse(t.responseText)}function o(t){return function(e){return i.parse(e.responseText,t)}}function s(e){return e.map(l).join(t)}function l(t){return r.test(t)?'"'+t.replace(/\"/g,'""')+'"':t}return i.parse=function(t,e){var r;return i.parseRows(t,(function(t,n){if(r)return r(t,n-1);var i=function(e){for(var r={},n=t.length,i=0;i=l)return o;if(i)return i=!1,a;var e=c;if(34===t.charCodeAt(e)){for(var r=e;r++24?(isFinite(e)&&(clearTimeout(me),me=setTimeout(ye,e)),de=0):(de=1,ge(ye))}function xe(){for(var t=Date.now(),e=he;e;)t>=e.t&&e.c(t-e.t)&&(e.c=null),e=e.n;return t}function be(){for(var t,e=he,r=1/0;e;)e.c?(e.t1&&(e=t[a[o-2]],r=t[a[o-1]],n=t[s],(r[0]-e[0])*(n[1]-e[1])-(r[1]-e[1])*(n[0]-e[0])<=0);)--o;a[o++]=s}return a.slice(0,o)}function ke(t,e){return t[0]-e[0]||t[1]-e[1]}t.timer=function(){ve.apply(this,arguments)},t.timer.flush=function(){xe(),be()},t.round=function(t,e){return e?Math.round(t*(e=Math.pow(10,e)))/e:Math.round(t)},t.geom={},t.geom.hull=function(t){var e=_e,r=we;if(arguments.length)return n(t);function n(t){if(t.length<3)return[];var n,i=ce(e),a=ce(r),o=t.length,s=[],l=[];for(n=0;n=0;--n)p.push(t[s[c[n]][2]]);for(n=+f;nkt)s=s.L;else{if(!((i=a-Ve(s,o))>kt)){n>-kt?(e=s.P,r=s):i>-kt?(e=s,r=s.N):e=r=s;break}if(!s.R){e=s;break}s=s.R}var l=Fe(t);if(Pe.insert(e,l),e||r){if(e===r)return We(e),r=Fe(e.site),Pe.insert(l,r),l.edge=r.edge=Je(e.site,l.site),Ye(e),void Ye(r);if(r){We(e),We(r);var c=e.site,u=c.x,f=c.y,h=t.x-u,p=t.y-f,d=r.site,m=d.x-u,g=d.y-f,v=2*(h*g-p*m),y=h*h+p*p,x=m*m+g*g,b={x:(g*y-p*x)/v+u,y:(h*x-m*y)/v+f};Qe(r.edge,c,d,b),l.edge=Je(c,t,null,b),r.edge=Je(t,d,null,b),Ye(e),Ye(r)}else l.edge=Je(e.site,l.site)}}function Ue(t,e){var r=t.site,n=r.x,i=r.y,a=i-e;if(!a)return n;var o=t.P;if(!o)return-1/0;var s=(r=o.site).x,l=r.y,c=l-e;if(!c)return s;var u=s-n,f=1/a-1/c,h=u/c;return f?(-h+Math.sqrt(h*h-2*f*(u*u/(-2*c)-l+c/2+i-a/2)))/f+n:(n+s)/2}function Ve(t,e){var r=t.N;if(r)return Ue(r,e);var n=t.site;return n.y===e?n.x:1/0}function He(t){this.site=t,this.edges=[]}function qe(t,e){return e.angle-t.angle}function Ge(){er(this),this.x=this.y=this.arc=this.site=this.cy=null}function Ye(t){var e=t.P,r=t.N;if(e&&r){var n=e.site,i=t.site,a=r.site;if(n!==a){var o=i.x,s=i.y,l=n.x-o,c=n.y-s,u=a.x-o,f=2*(l*(g=a.y-s)-c*u);if(!(f>=-1e-12)){var h=l*l+c*c,p=u*u+g*g,d=(g*h-c*p)/f,m=(l*p-u*h)/f,g=m+s,v=De.pop()||new Ge;v.arc=t,v.site=i,v.x=d+o,v.y=g+Math.sqrt(d*d+m*m),v.cy=g,t.circle=v;for(var y=null,x=Oe._;x;)if(v.y=s)return;if(h>d){if(a){if(a.y>=c)return}else a={x:g,y:l};r={x:g,y:c}}else{if(a){if(a.y1)if(h>d){if(a){if(a.y>=c)return}else a={x:(l-i)/n,y:l};r={x:(c-i)/n,y:c}}else{if(a){if(a.y=s)return}else a={x:o,y:n*o+i};r={x:s,y:n*s+i}}else{if(a){if(a.x0)){if(e/=h,h<0){if(e0){if(e>f)return;e>u&&(u=e)}if(e=i-l,h||!(e<0)){if(e/=h,h<0){if(e>f)return;e>u&&(u=e)}else if(h>0){if(e0)){if(e/=p,p<0){if(e0){if(e>f)return;e>u&&(u=e)}if(e=a-c,p||!(e<0)){if(e/=p,p<0){if(e>f)return;e>u&&(u=e)}else if(p>0){if(e0&&(t.a={x:l+u*h,y:c+u*p}),f<1&&(t.b={x:l+f*h,y:c+f*p}),t}}}}}),l=o.length;l--;)(!Xe(e=o[l],t)||!s(e)||y(e.a.x-e.b.x)kt||y(i-r)>kt)&&(s.splice(o,0,new $e(Ke(a.site,u,y(n-f)kt?{x:f,y:y(e-f)kt?{x:y(r-d)kt?{x:h,y:y(e-h)kt?{x:y(r-p)=r&&c.x<=i&&c.y>=n&&c.y<=o?[[r,o],[i,o],[i,n],[r,n]]:[]).point=t[s]})),e}function s(t){return t.map((function(t,e){return{x:Math.round(n(t,e)/kt)*kt,y:Math.round(i(t,e)/kt)*kt,i:e}}))}return o.links=function(t){return ar(s(t)).edges.filter((function(t){return t.l&&t.r})).map((function(e){return{source:t[e.l.i],target:t[e.r.i]}}))},o.triangles=function(t){var e=[];return ar(s(t)).cells.forEach((function(r,n){for(var i,a,o,s,l=r.site,c=r.edges.sort(qe),u=-1,f=c.length,h=c[f-1].edge,p=h.l===l?h.r:h.l;++ua||f>o||h=_)<<1|e>=b,T=w+4;wa&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(r=r[0])===(n=n[0])?s[o]?s[o]+=n:s[++o]=n:(s[++o]=null,l.push({i:o,x:dr(r,n)})),a=vr.lastIndex;return am&&(m=l.x),l.y>g&&(g=l.y),c.push(l.x),u.push(l.y);else for(f=0;fm&&(m=b),_>g&&(g=_),c.push(b),u.push(_)}var w=m-p,T=g-d;function k(t,e,r,n,i,a,o,s){if(!isNaN(r)&&!isNaN(n))if(t.leaf){var l=t.x,c=t.y;if(null!=l)if(y(l-r)+y(c-n)<.01)A(t,e,r,n,i,a,o,s);else{var u=t.point;t.x=t.y=t.point=null,A(t,u,l,c,i,a,o,s),A(t,e,r,n,i,a,o,s)}else t.x=r,t.y=n,t.point=e}else A(t,e,r,n,i,a,o,s)}function A(t,e,r,n,i,a,o,s){var l=.5*(i+o),c=.5*(a+s),u=r>=l,f=n>=c,h=f<<1|u;t.leaf=!1,u?i=l:o=l,f?a=c:s=c,k(t=t.nodes[h]||(t.nodes[h]={leaf:!0,nodes:[],point:null,x:null,y:null}),e,r,n,i,a,o,s)}w>T?g=d+w:m=p+T;var M={leaf:!0,nodes:[],point:null,x:null,y:null,add:function(t){k(M,t,+v(t,++f),+x(t,f),p,d,m,g)},visit:function(t){ur(t,M,p,d,m,g)},find:function(t){return fr(M,t[0],t[1],p,d,m,g)}};if(f=-1,null==e){for(;++f=0&&!(n=t.interpolators[i](e,r)););return n}function xr(t,e){var r,n=[],i=[],a=t.length,o=e.length,s=Math.min(t.length,e.length);for(r=0;r=1?1:t(e)}}function kr(t){return function(e){return 1-t(1-e)}}function Ar(t){return function(e){return.5*(e<.5?t(2*e):2-t(2-2*e))}}function Mr(t){return t*t}function Sr(t){return t*t*t}function Er(t){if(t<=0)return 0;if(t>=1)return 1;var e=t*t,r=e*t;return 4*(t<.5?r:3*(t-e)+r-.75)}function Lr(t){return 1-Math.cos(t*Et)}function Cr(t){return Math.pow(2,10*(t-1))}function Pr(t){return 1-Math.sqrt(1-t*t)}function Ir(t){return t<1/2.75?7.5625*t*t:t<2/2.75?7.5625*(t-=1.5/2.75)*t+.75:t<2.5/2.75?7.5625*(t-=2.25/2.75)*t+.9375:7.5625*(t-=2.625/2.75)*t+.984375}function Or(t,e){return e-=t,function(r){return Math.round(t+e*r)}}function zr(t){var e,r,n,i=[t.a,t.b],a=[t.c,t.d],o=Rr(i),s=Dr(i,a),l=Rr(((e=a)[0]+=(n=-s)*(r=i)[0],e[1]+=n*r[1],e))||0;i[0]*a[1]=0?t.slice(0,e):t,i=e>=0?t.slice(e+1):"in";return n=_r.get(n)||br,Tr((i=wr.get(i)||C)(n.apply(null,r.call(arguments,1))))},t.interpolateHcl=function(e,r){e=t.hcl(e),r=t.hcl(r);var n=e.h,i=e.c,a=e.l,o=r.h-n,s=r.c-i,l=r.l-a;isNaN(s)&&(s=0,i=isNaN(i)?r.c:i);isNaN(o)?(o=0,n=isNaN(n)?r.h:n):o>180?o-=360:o<-180&&(o+=360);return function(t){return Ht(n+o*t,i+s*t,a+l*t)+""}},t.interpolateHsl=function(e,r){e=t.hsl(e),r=t.hsl(r);var n=e.h,i=e.s,a=e.l,o=r.h-n,s=r.s-i,l=r.l-a;isNaN(s)&&(s=0,i=isNaN(i)?r.s:i);isNaN(o)?(o=0,n=isNaN(n)?r.h:n):o>180?o-=360:o<-180&&(o+=360);return function(t){return jt(n+o*t,i+s*t,a+l*t)+""}},t.interpolateLab=function(e,r){e=t.lab(e),r=t.lab(r);var n=e.l,i=e.a,a=e.b,o=r.l-n,s=r.a-i,l=r.b-a;return function(t){return Wt(n+o*t,i+s*t,a+l*t)+""}},t.interpolateRound=Or,t.transform=function(e){var r=i.createElementNS(t.ns.prefix.svg,"g");return(t.transform=function(t){if(null!=t){r.setAttribute("transform",t);var e=r.transform.baseVal.consolidate()}return new zr(e?e.matrix:Fr)})(e)},zr.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var Fr={a:1,b:0,c:0,d:1,e:0,f:0};function Br(t){return t.length?t.pop()+",":""}function Nr(e,r){var n=[],i=[];return e=t.transform(e),r=t.transform(r),function(t,e,r,n){if(t[0]!==e[0]||t[1]!==e[1]){var i=r.push("translate(",null,",",null,")");n.push({i:i-4,x:dr(t[0],e[0])},{i:i-2,x:dr(t[1],e[1])})}else(e[0]||e[1])&&r.push("translate("+e+")")}(e.translate,r.translate,n,i),function(t,e,r,n){t!==e?(t-e>180?e+=360:e-t>180&&(t+=360),n.push({i:r.push(Br(r)+"rotate(",null,")")-2,x:dr(t,e)})):e&&r.push(Br(r)+"rotate("+e+")")}(e.rotate,r.rotate,n,i),function(t,e,r,n){t!==e?n.push({i:r.push(Br(r)+"skewX(",null,")")-2,x:dr(t,e)}):e&&r.push(Br(r)+"skewX("+e+")")}(e.skew,r.skew,n,i),function(t,e,r,n){if(t[0]!==e[0]||t[1]!==e[1]){var i=r.push(Br(r)+"scale(",null,",",null,")");n.push({i:i-4,x:dr(t[0],e[0])},{i:i-2,x:dr(t[1],e[1])})}else 1===e[0]&&1===e[1]||r.push(Br(r)+"scale("+e+")")}(e.scale,r.scale,n,i),e=r=null,function(t){for(var e,r=-1,a=i.length;++r0?n=t:(e.c=null,e.t=NaN,e=null,l.end({type:"end",alpha:n=0})):t>0&&(l.start({type:"start",alpha:n=t}),e=ve(s.tick)),s):n},s.start=function(){var t,e,r,n=v.length,l=y.length,u=c[0],d=c[1];for(t=0;t=0;)r.push(i[n])}function $r(t,e){for(var r=[t],n=[];null!=(t=r.pop());)if(n.push(t),(a=t.children)&&(i=a.length))for(var i,a,o=-1;++o=0;)o.push(u=c[l]),u.parent=a,u.depth=a.depth+1;r&&(a.value=0),a.children=c}else r&&(a.value=+r.call(n,a,a.depth)||0),delete a.children;return $r(i,(function(e){var n,i;t&&(n=e.children)&&n.sort(t),r&&(i=e.parent)&&(i.value+=e.value)})),s}return n.sort=function(e){return arguments.length?(t=e,n):t},n.children=function(t){return arguments.length?(e=t,n):e},n.value=function(t){return arguments.length?(r=t,n):r},n.revalue=function(t){return r&&(Qr(t,(function(t){t.children&&(t.value=0)})),$r(t,(function(t){var e;t.children||(t.value=+r.call(n,t,t.depth)||0),(e=t.parent)&&(e.value+=t.value)}))),t},n},t.layout.partition=function(){var e=t.layout.hierarchy(),r=[1,1];function n(t,n){var i=e.call(this,t,n);return function t(e,r,n,i){var a=e.children;if(e.x=r,e.y=e.depth*i,e.dx=n,e.dy=i,a&&(o=a.length)){var o,s,l,c=-1;for(n=e.value?n/e.value:0;++cs&&(s=n),o.push(n)}for(r=0;ri&&(n=r,i=e);return n}function dn(t){return t.reduce(mn,0)}function mn(t,e){return t+e[1]}function gn(t,e){return vn(t,Math.ceil(Math.log(e.length)/Math.LN2+1))}function vn(t,e){for(var r=-1,n=+t[0],i=(t[1]-n)/e,a=[];++r<=e;)a[r]=i*r+n;return a}function yn(e){return[t.min(e),t.max(e)]}function xn(t,e){return t.value-e.value}function bn(t,e){var r=t._pack_next;t._pack_next=e,e._pack_prev=t,e._pack_next=r,r._pack_prev=e}function _n(t,e){t._pack_next=e,e._pack_prev=t}function wn(t,e){var r=e.x-t.x,n=e.y-t.y,i=t.r+e.r;return.999*i*i>r*r+n*n}function Tn(t){if((e=t.children)&&(l=e.length)){var e,r,n,i,a,o,s,l,c=1/0,u=-1/0,f=1/0,h=-1/0;if(e.forEach(kn),(r=e[0]).x=-r.r,r.y=0,x(r),l>1&&((n=e[1]).x=n.r,n.y=0,x(n),l>2))for(Mn(r,n,i=e[2]),x(i),bn(r,i),r._pack_prev=i,bn(i,n),n=r._pack_next,a=3;a0)for(o=-1;++o=f[0]&&l<=f[1]&&((s=c[t.bisect(h,l,1,d)-1]).y+=m,s.push(a[o]));return c}return a.value=function(t){return arguments.length?(r=t,a):r},a.range=function(t){return arguments.length?(n=ce(t),a):n},a.bins=function(t){return arguments.length?(i="number"==typeof t?function(e){return vn(e,t)}:ce(t),a):i},a.frequency=function(t){return arguments.length?(e=!!t,a):e},a},t.layout.pack=function(){var e,r=t.layout.hierarchy().sort(xn),n=0,i=[1,1];function a(t,a){var o=r.call(this,t,a),s=o[0],l=i[0],c=i[1],u=null==e?Math.sqrt:"function"==typeof e?e:function(){return e};if(s.x=s.y=0,$r(s,(function(t){t.r=+u(t.value)})),$r(s,Tn),n){var f=n*(e?1:Math.max(2*s.r/l,2*s.r/c))/2;$r(s,(function(t){t.r+=f})),$r(s,Tn),$r(s,(function(t){t.r-=f}))}return function t(e,r,n,i){var a=e.children;if(e.x=r+=i*e.x,e.y=n+=i*e.y,e.r*=i,a)for(var o=-1,s=a.length;++op.x&&(p=t),t.depth>d.depth&&(d=t)}));var m=r(h,p)/2-h.x,g=n[0]/(p.x+r(p,h)/2+m),v=n[1]/(d.depth||1);Qr(u,(function(t){t.x=(t.x+m)*g,t.y=t.depth*v}))}return c}function o(t){var e=t.children,n=t.parent.children,i=t.i?n[t.i-1]:null;if(e.length){!function(t){var e,r=0,n=0,i=t.children,a=i.length;for(;--a>=0;)(e=i[a]).z+=r,e.m+=r,r+=e.s+(n+=e.c)}(t);var a=(e[0].z+e[e.length-1].z)/2;i?(t.z=i.z+r(t._,i._),t.m=t.z-a):t.z=a}else i&&(t.z=i.z+r(t._,i._));t.parent.A=function(t,e,n){if(e){for(var i,a=t,o=t,s=e,l=a.parent.children[0],c=a.m,u=o.m,f=s.m,h=l.m;s=Ln(s),a=En(a),s&&a;)l=En(l),(o=Ln(o)).a=t,(i=s.z+f-a.z-c+r(s._,a._))>0&&(Cn(Pn(s,t,n),t,i),c+=i,u+=i),f+=s.m,c+=a.m,h+=l.m,u+=o.m;s&&!Ln(o)&&(o.t=s,o.m+=f-u),a&&!En(l)&&(l.t=a,l.m+=c-h,n=t)}return n}(t,i,t.parent.A||n[0])}function s(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function l(t){t.x*=n[0],t.y=t.depth*n[1]}return a.separation=function(t){return arguments.length?(r=t,a):r},a.size=function(t){return arguments.length?(i=null==(n=t)?l:null,a):i?null:n},a.nodeSize=function(t){return arguments.length?(i=null==(n=t)?null:l,a):i?n:null},Kr(a,e)},t.layout.cluster=function(){var e=t.layout.hierarchy().sort(null).value(null),r=Sn,n=[1,1],i=!1;function a(a,o){var s,l=e.call(this,a,o),c=l[0],u=0;$r(c,(function(e){var n=e.children;n&&n.length?(e.x=function(t){return t.reduce((function(t,e){return t+e.x}),0)/t.length}(n),e.y=function(e){return 1+t.max(e,(function(t){return t.y}))}(n)):(e.x=s?u+=r(e,s):0,e.y=0,s=e)}));var f=function t(e){var r=e.children;return r&&r.length?t(r[0]):e}(c),h=function t(e){var r,n=e.children;return n&&(r=n.length)?t(n[r-1]):e}(c),p=f.x-r(f,h)/2,d=h.x+r(h,f)/2;return $r(c,i?function(t){t.x=(t.x-c.x)*n[0],t.y=(c.y-t.y)*n[1]}:function(t){t.x=(t.x-p)/(d-p)*n[0],t.y=(1-(c.y?t.y/c.y:1))*n[1]}),l}return a.separation=function(t){return arguments.length?(r=t,a):r},a.size=function(t){return arguments.length?(i=null==(n=t),a):i?null:n},a.nodeSize=function(t){return arguments.length?(i=null!=(n=t),a):i?n:null},Kr(a,e)},t.layout.treemap=function(){var e,r=t.layout.hierarchy(),n=Math.round,i=[1,1],a=null,o=In,s=!1,l="squarify",c=.5*(1+Math.sqrt(5));function u(t,e){for(var r,n,i=-1,a=t.length;++i0;)s.push(r=c[i-1]),s.area+=r.area,"squarify"!==l||(n=p(s,m))<=h?(c.pop(),h=n):(s.area-=s.pop().area,d(s,m,a,!1),m=Math.min(a.dx,a.dy),s.length=s.area=0,h=1/0);s.length&&(d(s,m,a,!0),s.length=s.area=0),e.forEach(f)}}function h(t){var e=t.children;if(e&&e.length){var r,n=o(t),i=e.slice(),a=[];for(u(i,n.dx*n.dy/t.value),a.area=0;r=i.pop();)a.push(r),a.area+=r.area,null!=r.z&&(d(a,r.z?n.dx:n.dy,n,!i.length),a.length=a.area=0);e.forEach(h)}}function p(t,e){for(var r,n=t.area,i=0,a=1/0,o=-1,s=t.length;++oi&&(i=r));return e*=e,(n*=n)?Math.max(e*i*c/n,n/(e*a*c)):1/0}function d(t,e,r,i){var a,o=-1,s=t.length,l=r.x,c=r.y,u=e?n(t.area/e):0;if(e==r.dx){for((i||u>r.dy)&&(u=r.dy);++or.dx)&&(u=r.dx);++o1);return t+e*r*Math.sqrt(-2*Math.log(i)/i)}},logNormal:function(){var e=t.random.normal.apply(t,arguments);return function(){return Math.exp(e())}},bates:function(e){var r=t.random.irwinHall(e);return function(){return r()/e}},irwinHall:function(t){return function(){for(var e=0,r=0;r2?jn:Rn,s=i?Ur:jr;return a=t(e,r,s,n),o=t(r,e,s,yr),l}function l(t){return a(t)}return l.invert=function(t){return o(t)},l.domain=function(t){return arguments.length?(e=t.map(Number),s()):e},l.range=function(t){return arguments.length?(r=t,s()):r},l.rangeRound=function(t){return l.range(t).interpolate(Or)},l.clamp=function(t){return arguments.length?(i=t,s()):i},l.interpolate=function(t){return arguments.length?(n=t,s()):n},l.ticks=function(t){return qn(e,t)},l.tickFormat=function(t,r){return d3_scale_linearTickFormat(e,t,r)},l.nice=function(t){return Vn(e,t),s()},l.copy=function(){return t(e,r,n,i)},s()}([0,1],[0,1],yr,!1)};t.scale.log=function(){return function t(e,r,n,i){function a(t){return(n?Math.log(t<0?0:t):-Math.log(t>0?0:-t))/Math.log(r)}function o(t){return n?Math.pow(r,t):-Math.pow(r,-t)}function s(t){return e(a(t))}return s.invert=function(t){return o(e.invert(t))},s.domain=function(t){return arguments.length?(n=t[0]>=0,e.domain((i=t.map(Number)).map(a)),s):i},s.base=function(t){return arguments.length?(r=+t,e.domain(i.map(a)),s):r},s.nice=function(){var t=Fn(i.map(a),n?Math:Gn);return e.domain(t),i=t.map(o),s},s.ticks=function(){var t=zn(i),e=[],s=t[0],l=t[1],c=Math.floor(a(s)),u=Math.ceil(a(l)),f=r%1?2:r;if(isFinite(u-c)){if(n){for(;c0;h--)e.push(o(c)*h);for(c=0;e[c]l;u--);e=e.slice(c,u)}return e},s.copy=function(){return t(e.copy(),r,n,i)},Un(s,e)}(t.scale.linear().domain([0,1]),10,!0,[1,10])};var Gn={floor:function(t){return-Math.ceil(-t)},ceil:function(t){return-Math.floor(-t)}};function Yn(t){return function(e){return e<0?-Math.pow(-e,t):Math.pow(e,t)}}t.scale.pow=function(){return function t(e,r,n){var i=Yn(r),a=Yn(1/r);function o(t){return e(i(t))}return o.invert=function(t){return a(e.invert(t))},o.domain=function(t){return arguments.length?(e.domain((n=t.map(Number)).map(i)),o):n},o.ticks=function(t){return qn(n,t)},o.tickFormat=function(t,e){return d3_scale_linearTickFormat(n,t,e)},o.nice=function(t){return o.domain(Vn(n,t))},o.exponent=function(t){return arguments.length?(i=Yn(r=t),a=Yn(1/r),e.domain(n.map(i)),o):r},o.copy=function(){return t(e.copy(),r,n)},Un(o,e)}(t.scale.linear(),1,[0,1])},t.scale.sqrt=function(){return t.scale.pow().exponent(.5)},t.scale.ordinal=function(){return function e(r,n){var i,a,o;function s(t){return a[((i.get(t)||("range"===n.t?i.set(t,r.push(t)):NaN))-1)%a.length]}function l(e,n){return t.range(r.length).map((function(t){return e+n*t}))}return s.domain=function(t){if(!arguments.length)return r;r=[],i=new _;for(var e,a=-1,o=t.length;++a0?i[t-1]:r[0],tf?0:1;if(c=St)return l(c,p)+(s?l(s,1-p):"")+"Z";var d,m,g,v,y,x,b,_,w,T,k,A,M=0,S=0,E=[];if((v=(+o.apply(this,arguments)||0)/2)&&(g=n===Qn?Math.sqrt(s*s+c*c):+n.apply(this,arguments),p||(S*=-1),c&&(S=Pt(g/c*Math.sin(v))),s&&(M=Pt(g/s*Math.sin(v)))),c){y=c*Math.cos(u+S),x=c*Math.sin(u+S),b=c*Math.cos(f-S),_=c*Math.sin(f-S);var L=Math.abs(f-u-2*S)<=At?0:1;if(S&&ii(y,x,b,_)===p^L){var C=(u+f)/2;y=c*Math.cos(C),x=c*Math.sin(C),b=_=null}}else y=x=0;if(s){w=s*Math.cos(f-M),T=s*Math.sin(f-M),k=s*Math.cos(u+M),A=s*Math.sin(u+M);var P=Math.abs(u-f+2*M)<=At?0:1;if(M&&ii(w,T,k,A)===1-p^P){var I=(u+f)/2;w=s*Math.cos(I),T=s*Math.sin(I),k=A=null}}else w=T=0;if(h>kt&&(d=Math.min(Math.abs(c-s)/2,+r.apply(this,arguments)))>.001){m=s0?0:1}function ai(t,e,r,n,i){var a=t[0]-e[0],o=t[1]-e[1],s=(i?n:-n)/Math.sqrt(a*a+o*o),l=s*o,c=-s*a,u=t[0]+l,f=t[1]+c,h=e[0]+l,p=e[1]+c,d=(u+h)/2,m=(f+p)/2,g=h-u,v=p-f,y=g*g+v*v,x=r-n,b=u*p-h*f,_=(v<0?-1:1)*Math.sqrt(Math.max(0,x*x*y-b*b)),w=(b*v-g*_)/y,T=(-b*g-v*_)/y,k=(b*v+g*_)/y,A=(-b*g+v*_)/y,M=w-d,S=T-m,E=k-d,L=A-m;return M*M+S*S>E*E+L*L&&(w=k,T=A),[[w-l,T-c],[w*r/x,T*r/x]]}function oi(){return!0}function si(t){var e=_e,r=we,n=oi,i=ci,a=i.key,o=.7;function s(a){var s,l=[],c=[],u=-1,f=a.length,h=ce(e),p=ce(r);function d(){l.push("M",i(t(c),o))}for(;++u1&&i.push("H",n[0]);return i.join("")},"step-before":fi,"step-after":hi,basis:mi,"basis-open":function(t){if(t.length<4)return ci(t);var e,r=[],n=-1,i=t.length,a=[0],o=[0];for(;++n<3;)e=t[n],a.push(e[0]),o.push(e[1]);r.push(gi(xi,a)+","+gi(xi,o)),--n;for(;++n9&&(i=3*e/Math.sqrt(i),o[s]=i*r,o[s+1]=i*n));s=-1;for(;++s<=l;)i=(t[Math.min(l,s+1)][0]-t[Math.max(0,s-1)][0])/(6*(1+o[s]*o[s])),a.push([i||0,o[s]*i||0]);return a}(t))}});function ci(t){return t.length>1?t.join("L"):t+"Z"}function ui(t){return t.join("L")+"Z"}function fi(t){for(var e=0,r=t.length,n=t[0],i=[n[0],",",n[1]];++e1){s=e[1],a=t[l],l++,n+="C"+(i[0]+o[0])+","+(i[1]+o[1])+","+(a[0]-s[0])+","+(a[1]-s[1])+","+a[0]+","+a[1];for(var c=2;cAt)+",1 "+e}function l(t,e,r,n){return"Q 0,0 "+n}return a.radius=function(t){return arguments.length?(r=ce(t),a):r},a.source=function(e){return arguments.length?(t=ce(e),a):t},a.target=function(t){return arguments.length?(e=ce(t),a):e},a.startAngle=function(t){return arguments.length?(n=ce(t),a):n},a.endAngle=function(t){return arguments.length?(i=ce(t),a):i},a},t.svg.diagonal=function(){var t=ki,e=Ai,r=Si;function n(n,i){var a=t.call(this,n,i),o=e.call(this,n,i),s=(a.y+o.y)/2,l=[a,{x:a.x,y:s},{x:o.x,y:s},o];return"M"+(l=l.map(r))[0]+"C"+l[1]+" "+l[2]+" "+l[3]}return n.source=function(e){return arguments.length?(t=ce(e),n):t},n.target=function(t){return arguments.length?(e=ce(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},t.svg.diagonal.radial=function(){var e=t.svg.diagonal(),r=Si,n=e.projection;return e.projection=function(t){return arguments.length?n(Ei(r=t)):r},e},t.svg.symbol=function(){var t=Ci,e=Li;function r(r,n){return(Ii.get(t.call(this,r,n))||Pi)(e.call(this,r,n))}return r.type=function(e){return arguments.length?(t=ce(e),r):t},r.size=function(t){return arguments.length?(e=ce(t),r):e},r};var Ii=t.map({circle:Pi,cross:function(t){var e=Math.sqrt(t/5)/2;return"M"+-3*e+","+-e+"H"+-e+"V"+-3*e+"H"+e+"V"+-e+"H"+3*e+"V"+e+"H"+e+"V"+3*e+"H"+-e+"V"+e+"H"+-3*e+"Z"},diamond:function(t){var e=Math.sqrt(t/(2*zi)),r=e*zi;return"M0,"+-e+"L"+r+",0 0,"+e+" "+-r+",0Z"},square:function(t){var e=Math.sqrt(t)/2;return"M"+-e+","+-e+"L"+e+","+-e+" "+e+","+e+" "+-e+","+e+"Z"},"triangle-down":function(t){var e=Math.sqrt(t/Oi),r=e*Oi/2;return"M0,"+r+"L"+e+","+-r+" "+-e+","+-r+"Z"},"triangle-up":function(t){var e=Math.sqrt(t/Oi),r=e*Oi/2;return"M0,"+-r+"L"+e+","+r+" "+-e+","+r+"Z"}});t.svg.symbolTypes=Ii.keys();var Oi=Math.sqrt(3),zi=Math.tan(30*Lt);Y.transition=function(t){for(var e,r,n=Bi||++Ui,i=qi(t),a=[],o=Ni||{time:Date.now(),ease:Er,delay:0,duration:250},s=-1,l=this.length;++s0;)c[--h].call(t,o);if(a>=1)return f.event&&f.event.end.call(t,t.__data__,e),--u.count?delete u[n]:delete t[r],1}f||(a=i.time,o=ve((function(t){var e=f.delay;if(o.t=e+a,e<=t)return h(t-e);o.c=h}),0,a),f=u[n]={tween:new _,time:a,timer:o,delay:i.delay,duration:i.duration,ease:i.ease,index:e},i=null,++u.count)}ji.call=Y.call,ji.empty=Y.empty,ji.node=Y.node,ji.size=Y.size,t.transition=function(e,r){return e&&e.transition?Bi?e.transition(r):e:t.selection().transition(e)},t.transition.prototype=ji,ji.select=function(t){var e,r,n,i=this.id,a=this.namespace,o=[];t=W(t);for(var s=-1,l=this.length;++srect,.s>rect").attr("width",s[1]-s[0])}function m(t){t.select(".extent").attr("y",l[0]),t.selectAll(".extent,.e>rect,.w>rect").attr("height",l[1]-l[0])}function g(){var f,g,v=this,y=t.select(t.event.target),x=n.of(v,arguments),b=t.select(v),_=y.datum(),w=!/^(n|s)$/.test(_)&&i,T=!/^(e|w)$/.test(_)&&a,k=y.classed("extent"),A=bt(v),M=t.mouse(v),S=t.select(o(v)).on("keydown.brush",C).on("keyup.brush",P);if(t.event.changedTouches?S.on("touchmove.brush",I).on("touchend.brush",z):S.on("mousemove.brush",I).on("mouseup.brush",z),b.interrupt().selectAll("*").interrupt(),k)M[0]=s[0]-M[0],M[1]=l[0]-M[1];else if(_){var E=+/w$/.test(_),L=+/^n/.test(_);g=[s[1-E]-M[0],l[1-L]-M[1]],M[0]=s[E],M[1]=l[L]}else t.event.altKey&&(f=M.slice());function C(){32==t.event.keyCode&&(k||(f=null,M[0]-=s[1],M[1]-=l[1],k=2),F())}function P(){32==t.event.keyCode&&2==k&&(M[0]+=s[1],M[1]+=l[1],k=0,F())}function I(){var e=t.mouse(v),r=!1;g&&(e[0]+=g[0],e[1]+=g[1]),k||(t.event.altKey?(f||(f=[(s[0]+s[1])/2,(l[0]+l[1])/2]),M[0]=s[+(e[0]>>1;e.dtype||(e.dtype="array"),"string"==typeof e.dtype?d=new(f(e.dtype))(g):e.dtype&&(d=e.dtype,Array.isArray(d)&&(d.length=g));for(var v=0;vr||s>1073741824){for(var h=0;he+n||w>r+n||T=A||a===o)){var s=y[i];void 0===o&&(o=s.length);for(var l=a;l=d&&u<=g&&f>=m&&f<=v&&S.push(c)}var h=x[i],p=h[4*a+0],b=h[4*a+1],M=h[4*a+2],E=h[4*a+3],P=C(h,a+1),I=.5*n,O=i+1;L(e,r,I,O,p,b||M||E||P),L(e,r+I,I,O,b,M||E||P),L(e+I,r,I,O,M,E||P),L(e+I,r+I,I,O,E,P)}}function C(t,e){for(var r=null,n=0;null===r;)if(r=t[4*e+n],++n>t.length)return null;return r}return L(0,0,1,0,0,1),S},d;function E(t,e,r,i,a){for(var o=[],s=0;s0){e+=Math.abs(a(t[0]));for(var r=1;r2){for(s=0;st[0]&&(e[0]=t[0]),e[1]>t[1]&&(e[1]=t[1]),e[2]=0))throw new Error("precision must be a positive number");var r=Math.pow(10,e||0);return Math.round(t*r)/r},r.radiansToLength=f,r.lengthToRadians=h,r.lengthToDegrees=function(t,e){return p(h(t,e))},r.bearingToAzimuth=function(t){var e=t%360;return e<0&&(e+=360),e},r.radiansToDegrees=p,r.degreesToRadians=function(t){return t%360*Math.PI/180},r.convertLength=function(t,e,r){if(void 0===e&&(e="kilometers"),void 0===r&&(r="kilometers"),!(t>=0))throw new Error("length must be a positive number");return f(h(t,e),r)},r.convertArea=function(t,e,n){if(void 0===e&&(e="meters"),void 0===n&&(n="kilometers"),!(t>=0))throw new Error("area must be a positive number");var i=r.areaFactors[e];if(!i)throw new Error("invalid original units");var a=r.areaFactors[n];if(!a)throw new Error("invalid final units");return t/i*a},r.isNumber=d,r.isObject=function(t){return!!t&&t.constructor===Object},r.validateBBox=function(t){if(!t)throw new Error("bbox is required");if(!Array.isArray(t))throw new Error("bbox must be an Array");if(4!==t.length&&6!==t.length)throw new Error("bbox must be an Array of 4 or 6 numbers");t.forEach((function(t){if(!d(t))throw new Error("bbox must only contain numbers")}))},r.validateId=function(t){if(!t)throw new Error("id is required");if(-1===["string","number"].indexOf(typeof t))throw new Error("id must be a number or a string")}},{}],65:[function(t,e,r){"use strict";Object.defineProperty(r,"__esModule",{value:!0});var n=t("@turf/helpers");function i(t,e,r){if(null!==t)for(var n,a,o,s,l,c,u,f,h=0,p=0,d=t.type,m="FeatureCollection"===d,g="Feature"===d,v=m?t.features.length:1,y=0;yc||p>u||d>f)return l=i,c=r,u=p,f=d,void(o=0);var m=n.lineString([l,i],t.properties);if(!1===e(m,r,a,d,o))return!1;o++,l=i}))&&void 0}}}))}function u(t,e){if(!t)throw new Error("geojson is required");l(t,(function(t,r,i){if(null!==t.geometry){var a=t.geometry.type,o=t.geometry.coordinates;switch(a){case"LineString":if(!1===e(t,r,i,0,0))return!1;break;case"Polygon":for(var s=0;si&&(i=t[o]),t[o] - * @license MIT - */function i(t,e){if(t===e)return 0;for(var r=t.length,n=e.length,i=0,a=Math.min(r,n);i=0;c--)if(u[c]!==f[c])return!1;for(c=u.length-1;c>=0;c--)if(s=u[c],!x(t[s],e[s],r,n))return!1;return!0}(t,e,r,n))}return r?t===e:t==e}function b(t){return"[object Arguments]"==Object.prototype.toString.call(t)}function _(t,e){if(!t||!e)return!1;if("[object RegExp]"==Object.prototype.toString.call(e))return e.test(t);try{if(t instanceof e)return!0}catch(t){}return!Error.isPrototypeOf(e)&&!0===e.call({},t)}function w(t,e,r,n){var i;if("function"!=typeof e)throw new TypeError('"block" argument must be a function');"string"==typeof r&&(n=r,r=null),i=function(t){var e;try{t()}catch(t){e=t}return e}(e),n=(r&&r.name?" ("+r.name+").":".")+(n?" "+n:"."),t&&!i&&v(i,r,"Missing expected exception"+n);var a="string"==typeof n,s=!t&&i&&!r;if((!t&&o.isError(i)&&a&&_(i,r)||s)&&v(i,r,"Got unwanted exception"+n),t&&i&&r&&!_(i,r)||!t&&i)throw i}h.AssertionError=function(t){this.name="AssertionError",this.actual=t.actual,this.expected=t.expected,this.operator=t.operator,t.message?(this.message=t.message,this.generatedMessage=!1):(this.message=function(t){return m(g(t.actual),128)+" "+t.operator+" "+m(g(t.expected),128)}(this),this.generatedMessage=!0);var e=t.stackStartFunction||v;if(Error.captureStackTrace)Error.captureStackTrace(this,e);else{var r=new Error;if(r.stack){var n=r.stack,i=d(e),a=n.indexOf("\n"+i);if(a>=0){var o=n.indexOf("\n",a+1);n=n.substring(o+1)}this.stack=n}}},o.inherits(h.AssertionError,Error),h.fail=v,h.ok=y,h.equal=function(t,e,r){t!=e&&v(t,e,r,"==",h.equal)},h.notEqual=function(t,e,r){t==e&&v(t,e,r,"!=",h.notEqual)},h.deepEqual=function(t,e,r){x(t,e,!1)||v(t,e,r,"deepEqual",h.deepEqual)},h.deepStrictEqual=function(t,e,r){x(t,e,!0)||v(t,e,r,"deepStrictEqual",h.deepStrictEqual)},h.notDeepEqual=function(t,e,r){x(t,e,!1)&&v(t,e,r,"notDeepEqual",h.notDeepEqual)},h.notDeepStrictEqual=function t(e,r,n){x(e,r,!0)&&v(e,r,n,"notDeepStrictEqual",t)},h.strictEqual=function(t,e,r){t!==e&&v(t,e,r,"===",h.strictEqual)},h.notStrictEqual=function(t,e,r){t===e&&v(t,e,r,"!==",h.notStrictEqual)},h.throws=function(t,e,r){w(!0,t,e,r)},h.doesNotThrow=function(t,e,r){w(!1,t,e,r)},h.ifError=function(t){if(t)throw t},h.strict=n((function t(e,r){e||v(e,!0,r,"==",t)}),h,{equal:h.strictEqual,deepEqual:h.deepStrictEqual,notEqual:h.notStrictEqual,notDeepEqual:h.notDeepStrictEqual}),h.strict.strict=h.strict;var T=Object.keys||function(t){var e=[];for(var r in t)s.call(t,r)&&e.push(r);return e}}).call(this)}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"object-assign":242,"util/":74}],72:[function(t,e,r){"function"==typeof Object.create?e.exports=function(t,e){t.super_=e,t.prototype=Object.create(e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(t,e){t.super_=e;var r=function(){};r.prototype=e.prototype,t.prototype=new r,t.prototype.constructor=t}},{}],73:[function(t,e,r){e.exports=function(t){return t&&"object"==typeof t&&"function"==typeof t.copy&&"function"==typeof t.fill&&"function"==typeof t.readUInt8}},{}],74:[function(t,e,r){(function(e,n){(function(){var i=/%[sdj%]/g;r.format=function(t){if(!v(t)){for(var e=[],r=0;r=a)return t;switch(t){case"%s":return String(n[r++]);case"%d":return Number(n[r++]);case"%j":try{return JSON.stringify(n[r++])}catch(t){return"[Circular]"}default:return t}})),l=n[r];r=3&&(n.depth=arguments[2]),arguments.length>=4&&(n.colors=arguments[3]),d(e)?n.showHidden=e:e&&r._extend(n,e),y(n.showHidden)&&(n.showHidden=!1),y(n.depth)&&(n.depth=2),y(n.colors)&&(n.colors=!1),y(n.customInspect)&&(n.customInspect=!0),n.colors&&(n.stylize=l),u(n,t,n.depth)}function l(t,e){var r=s.styles[e];return r?"\x1b["+s.colors[r][0]+"m"+t+"\x1b["+s.colors[r][1]+"m":t}function c(t,e){return t}function u(t,e,n){if(t.customInspect&&e&&T(e.inspect)&&e.inspect!==r.inspect&&(!e.constructor||e.constructor.prototype!==e)){var i=e.inspect(n,t);return v(i)||(i=u(t,i,n)),i}var a=function(t,e){if(y(e))return t.stylize("undefined","undefined");if(v(e)){var r="'"+JSON.stringify(e).replace(/^"|"$/g,"").replace(/'/g,"\\'").replace(/\\"/g,'"')+"'";return t.stylize(r,"string")}if(g(e))return t.stylize(""+e,"number");if(d(e))return t.stylize(""+e,"boolean");if(m(e))return t.stylize("null","null")}(t,e);if(a)return a;var o=Object.keys(e),s=function(t){var e={};return t.forEach((function(t,r){e[t]=!0})),e}(o);if(t.showHidden&&(o=Object.getOwnPropertyNames(e)),w(e)&&(o.indexOf("message")>=0||o.indexOf("description")>=0))return f(e);if(0===o.length){if(T(e)){var l=e.name?": "+e.name:"";return t.stylize("[Function"+l+"]","special")}if(x(e))return t.stylize(RegExp.prototype.toString.call(e),"regexp");if(_(e))return t.stylize(Date.prototype.toString.call(e),"date");if(w(e))return f(e)}var c,b="",k=!1,A=["{","}"];(p(e)&&(k=!0,A=["[","]"]),T(e))&&(b=" [Function"+(e.name?": "+e.name:"")+"]");return x(e)&&(b=" "+RegExp.prototype.toString.call(e)),_(e)&&(b=" "+Date.prototype.toUTCString.call(e)),w(e)&&(b=" "+f(e)),0!==o.length||k&&0!=e.length?n<0?x(e)?t.stylize(RegExp.prototype.toString.call(e),"regexp"):t.stylize("[Object]","special"):(t.seen.push(e),c=k?function(t,e,r,n,i){for(var a=[],o=0,s=e.length;o=0&&0,t+e.replace(/\u001b\[\d\d?m/g,"").length+1}),0)>60)return r[0]+(""===e?"":e+"\n ")+" "+t.join(",\n ")+" "+r[1];return r[0]+e+" "+t.join(", ")+" "+r[1]}(c,b,A)):A[0]+b+A[1]}function f(t){return"["+Error.prototype.toString.call(t)+"]"}function h(t,e,r,n,i,a){var o,s,l;if((l=Object.getOwnPropertyDescriptor(e,i)||{value:e[i]}).get?s=l.set?t.stylize("[Getter/Setter]","special"):t.stylize("[Getter]","special"):l.set&&(s=t.stylize("[Setter]","special")),E(n,i)||(o="["+i+"]"),s||(t.seen.indexOf(l.value)<0?(s=m(r)?u(t,l.value,null):u(t,l.value,r-1)).indexOf("\n")>-1&&(s=a?s.split("\n").map((function(t){return" "+t})).join("\n").substr(2):"\n"+s.split("\n").map((function(t){return" "+t})).join("\n")):s=t.stylize("[Circular]","special")),y(o)){if(a&&i.match(/^\d+$/))return s;(o=JSON.stringify(""+i)).match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)?(o=o.substr(1,o.length-2),o=t.stylize(o,"name")):(o=o.replace(/'/g,"\\'").replace(/\\"/g,'"').replace(/(^"|"$)/g,"'"),o=t.stylize(o,"string"))}return o+": "+s}function p(t){return Array.isArray(t)}function d(t){return"boolean"==typeof t}function m(t){return null===t}function g(t){return"number"==typeof t}function v(t){return"string"==typeof t}function y(t){return void 0===t}function x(t){return b(t)&&"[object RegExp]"===k(t)}function b(t){return"object"==typeof t&&null!==t}function _(t){return b(t)&&"[object Date]"===k(t)}function w(t){return b(t)&&("[object Error]"===k(t)||t instanceof Error)}function T(t){return"function"==typeof t}function k(t){return Object.prototype.toString.call(t)}function A(t){return t<10?"0"+t.toString(10):t.toString(10)}r.debuglog=function(t){if(y(a)&&(a=e.env.NODE_DEBUG||""),t=t.toUpperCase(),!o[t])if(new RegExp("\\b"+t+"\\b","i").test(a)){var n=e.pid;o[t]=function(){var e=r.format.apply(r,arguments);console.error("%s %d: %s",t,n,e)}}else o[t]=function(){};return o[t]},r.inspect=s,s.colors={bold:[1,22],italic:[3,23],underline:[4,24],inverse:[7,27],white:[37,39],grey:[90,39],black:[30,39],blue:[34,39],cyan:[36,39],green:[32,39],magenta:[35,39],red:[31,39],yellow:[33,39]},s.styles={special:"cyan",number:"yellow",boolean:"yellow",undefined:"grey",null:"bold",string:"green",date:"magenta",regexp:"red"},r.isArray=p,r.isBoolean=d,r.isNull=m,r.isNullOrUndefined=function(t){return null==t},r.isNumber=g,r.isString=v,r.isSymbol=function(t){return"symbol"==typeof t},r.isUndefined=y,r.isRegExp=x,r.isObject=b,r.isDate=_,r.isError=w,r.isFunction=T,r.isPrimitive=function(t){return null===t||"boolean"==typeof t||"number"==typeof t||"string"==typeof t||"symbol"==typeof t||void 0===t},r.isBuffer=t("./support/isBuffer");var M=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];function S(){var t=new Date,e=[A(t.getHours()),A(t.getMinutes()),A(t.getSeconds())].join(":");return[t.getDate(),M[t.getMonth()],e].join(" ")}function E(t,e){return Object.prototype.hasOwnProperty.call(t,e)}r.log=function(){console.log("%s - %s",S(),r.format.apply(r,arguments))},r.inherits=t("inherits"),r._extend=function(t,e){if(!e||!b(e))return t;for(var r=Object.keys(e),n=r.length;n--;)t[r[n]]=e[r[n]];return t}}).call(this)}).call(this,t("_process"),"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"./support/isBuffer":73,_process:272,inherits:72}],75:[function(t,e,r){"use strict";r.byteLength=function(t){var e=c(t),r=e[0],n=e[1];return 3*(r+n)/4-n},r.toByteArray=function(t){var e,r,n=c(t),o=n[0],s=n[1],l=new a(function(t,e,r){return 3*(e+r)/4-r}(0,o,s)),u=0,f=s>0?o-4:o;for(r=0;r>16&255,l[u++]=e>>8&255,l[u++]=255&e;2===s&&(e=i[t.charCodeAt(r)]<<2|i[t.charCodeAt(r+1)]>>4,l[u++]=255&e);1===s&&(e=i[t.charCodeAt(r)]<<10|i[t.charCodeAt(r+1)]<<4|i[t.charCodeAt(r+2)]>>2,l[u++]=e>>8&255,l[u++]=255&e);return l},r.fromByteArray=function(t){for(var e,r=t.length,i=r%3,a=[],o=0,s=r-i;os?s:o+16383));1===i?(e=t[r-1],a.push(n[e>>2]+n[e<<4&63]+"==")):2===i&&(e=(t[r-2]<<8)+t[r-1],a.push(n[e>>10]+n[e>>4&63]+n[e<<2&63]+"="));return a.join("")};for(var n=[],i=[],a="undefined"!=typeof Uint8Array?Uint8Array:Array,o="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",s=0,l=o.length;s0)throw new Error("Invalid string. Length must be a multiple of 4");var r=t.indexOf("=");return-1===r&&(r=e),[r,r===e?0:4-r%4]}function u(t,e,r){for(var i,a,o=[],s=e;s>18&63]+n[a>>12&63]+n[a>>6&63]+n[63&a]);return o.join("")}i["-".charCodeAt(0)]=62,i["_".charCodeAt(0)]=63},{}],76:[function(t,e,r){"use strict";function n(t,e,r,n,i){for(var a=i+1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)>=0?(a=o,i=o-1):n=o+1}return a}function i(t,e,r,n,i){for(var a=i+1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)>0?(a=o,i=o-1):n=o+1}return a}function a(t,e,r,n,i){for(var a=n-1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)<0?(a=o,n=o+1):i=o-1}return a}function o(t,e,r,n,i){for(var a=n-1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)<=0?(a=o,n=o+1):i=o-1}return a}function s(t,e,r,n,i){for(;n<=i;){var a=n+i>>>1,o=t[a],s=void 0!==r?r(o,e):o-e;if(0===s)return a;s<=0?n=a+1:i=a-1}return-1}function l(t,e,r,n,i,a){return"function"==typeof r?a(t,e,r,void 0===n?0:0|n,void 0===i?t.length-1:0|i):a(t,e,void 0,void 0===r?0:0|r,void 0===n?t.length-1:0|n)}e.exports={ge:function(t,e,r,i,a){return l(t,e,r,i,a,n)},gt:function(t,e,r,n,a){return l(t,e,r,n,a,i)},lt:function(t,e,r,n,i){return l(t,e,r,n,i,a)},le:function(t,e,r,n,i){return l(t,e,r,n,i,o)},eq:function(t,e,r,n,i){return l(t,e,r,n,i,s)}}},{}],77:[function(t,e,r){"use strict";function n(t){var e=32;return(t&=-t)&&e--,65535&t&&(e-=16),16711935&t&&(e-=8),252645135&t&&(e-=4),858993459&t&&(e-=2),1431655765&t&&(e-=1),e}r.INT_BITS=32,r.INT_MAX=2147483647,r.INT_MIN=-1<<31,r.sign=function(t){return(t>0)-(t<0)},r.abs=function(t){var e=t>>31;return(t^e)-e},r.min=function(t,e){return e^(t^e)&-(t65535)<<4,e|=r=((t>>>=e)>255)<<3,e|=r=((t>>>=r)>15)<<2,(e|=r=((t>>>=r)>3)<<1)|(t>>>=r)>>1},r.log10=function(t){return t>=1e9?9:t>=1e8?8:t>=1e7?7:t>=1e6?6:t>=1e5?5:t>=1e4?4:t>=1e3?3:t>=100?2:t>=10?1:0},r.popCount=function(t){return 16843009*((t=(858993459&(t-=t>>>1&1431655765))+(t>>>2&858993459))+(t>>>4)&252645135)>>>24},r.countTrailingZeros=n,r.nextPow2=function(t){return t+=0===t,--t,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,(t|=t>>>16)+1},r.prevPow2=function(t){return t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,(t|=t>>>16)-(t>>>1)},r.parity=function(t){return t^=t>>>16,t^=t>>>8,t^=t>>>4,27030>>>(t&=15)&1};var i=new Array(256);!function(t){for(var e=0;e<256;++e){var r=e,n=e,i=7;for(r>>>=1;r;r>>>=1)n<<=1,n|=1&r,--i;t[e]=n<>>8&255]<<16|i[t>>>16&255]<<8|i[t>>>24&255]},r.interleave2=function(t,e){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t&=65535)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e&=65535)|e<<8))|e<<4))|e<<2))|e<<1))<<1},r.deinterleave2=function(t,e){return(t=65535&((t=16711935&((t=252645135&((t=858993459&((t=t>>>e&1431655765)|t>>>1))|t>>>2))|t>>>4))|t>>>16))<<16>>16},r.interleave3=function(t,e,r){return t=1227133513&((t=3272356035&((t=251719695&((t=4278190335&((t&=1023)|t<<16))|t<<8))|t<<4))|t<<2),(t|=(e=1227133513&((e=3272356035&((e=251719695&((e=4278190335&((e&=1023)|e<<16))|e<<8))|e<<4))|e<<2))<<1)|(r=1227133513&((r=3272356035&((r=251719695&((r=4278190335&((r&=1023)|r<<16))|r<<8))|r<<4))|r<<2))<<2},r.deinterleave3=function(t,e){return(t=1023&((t=4278190335&((t=251719695&((t=3272356035&((t=t>>>e&1227133513)|t>>>2))|t>>>4))|t>>>8))|t>>>16))<<22>>22},r.nextCombination=function(t){var e=t|t-1;return e+1|(~e&-~e)-1>>>n(t)+1}},{}],78:[function(t,e,r){"use strict";var n=t("clamp");e.exports=function(t,e){e||(e={});var r,o,s,l,c,u,f,h,p,d,m,g=null==e.cutoff?.25:e.cutoff,v=null==e.radius?8:e.radius,y=e.channel||0;if(ArrayBuffer.isView(t)||Array.isArray(t)){if(!e.width||!e.height)throw Error("For raw data width and height should be provided by options");r=e.width,o=e.height,l=t,u=e.stride?e.stride:Math.floor(t.length/r/o)}else window.HTMLCanvasElement&&t instanceof window.HTMLCanvasElement?(f=(h=t).getContext("2d"),r=h.width,o=h.height,p=f.getImageData(0,0,r,o),l=p.data,u=4):window.CanvasRenderingContext2D&&t instanceof window.CanvasRenderingContext2D?(h=t.canvas,f=t,r=h.width,o=h.height,p=f.getImageData(0,0,r,o),l=p.data,u=4):window.ImageData&&t instanceof window.ImageData&&(p=t,r=t.width,o=t.height,l=p.data,u=4);if(s=Math.max(r,o),window.Uint8ClampedArray&&l instanceof window.Uint8ClampedArray||window.Uint8Array&&l instanceof window.Uint8Array)for(c=l,l=Array(r*o),d=0,m=c.length;d - * @license MIT - */ -"use strict";var e=t("base64-js"),n=t("ieee754");r.Buffer=a,r.SlowBuffer=function(t){+t!=t&&(t=0);return a.alloc(+t)},r.INSPECT_MAX_BYTES=50;function i(t){if(t>2147483647)throw new RangeError('The value "'+t+'" is invalid for option "size"');var e=new Uint8Array(t);return e.__proto__=a.prototype,e}function a(t,e,r){if("number"==typeof t){if("string"==typeof e)throw new TypeError('The "string" argument must be of type string. Received type number');return l(t)}return o(t,e,r)}function o(t,e,r){if("string"==typeof t)return function(t,e){"string"==typeof e&&""!==e||(e="utf8");if(!a.isEncoding(e))throw new TypeError("Unknown encoding: "+e);var r=0|f(t,e),n=i(r),o=n.write(t,e);o!==r&&(n=n.slice(0,o));return n}(t,e);if(ArrayBuffer.isView(t))return c(t);if(null==t)throw TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof t);if(B(t,ArrayBuffer)||t&&B(t.buffer,ArrayBuffer))return function(t,e,r){if(e<0||t.byteLength=2147483647)throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+2147483647..toString(16)+" bytes");return 0|t}function f(t,e){if(a.isBuffer(t))return t.length;if(ArrayBuffer.isView(t)||B(t,ArrayBuffer))return t.byteLength;if("string"!=typeof t)throw new TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+typeof t);var r=t.length,n=arguments.length>2&&!0===arguments[2];if(!n&&0===r)return 0;for(var i=!1;;)switch(e){case"ascii":case"latin1":case"binary":return r;case"utf8":case"utf-8":return D(t).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*r;case"hex":return r>>>1;case"base64":return R(t).length;default:if(i)return n?-1:D(t).length;e=(""+e).toLowerCase(),i=!0}}function h(t,e,r){var n=!1;if((void 0===e||e<0)&&(e=0),e>this.length)return"";if((void 0===r||r>this.length)&&(r=this.length),r<=0)return"";if((r>>>=0)<=(e>>>=0))return"";for(t||(t="utf8");;)switch(t){case"hex":return M(this,e,r);case"utf8":case"utf-8":return T(this,e,r);case"ascii":return k(this,e,r);case"latin1":case"binary":return A(this,e,r);case"base64":return w(this,e,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return S(this,e,r);default:if(n)throw new TypeError("Unknown encoding: "+t);t=(t+"").toLowerCase(),n=!0}}function p(t,e,r){var n=t[e];t[e]=t[r],t[r]=n}function d(t,e,r,n,i){if(0===t.length)return-1;if("string"==typeof r?(n=r,r=0):r>2147483647?r=2147483647:r<-2147483648&&(r=-2147483648),N(r=+r)&&(r=i?0:t.length-1),r<0&&(r=t.length+r),r>=t.length){if(i)return-1;r=t.length-1}else if(r<0){if(!i)return-1;r=0}if("string"==typeof e&&(e=a.from(e,n)),a.isBuffer(e))return 0===e.length?-1:m(t,e,r,n,i);if("number"==typeof e)return e&=255,"function"==typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(t,e,r):Uint8Array.prototype.lastIndexOf.call(t,e,r):m(t,[e],r,n,i);throw new TypeError("val must be string, number or Buffer")}function m(t,e,r,n,i){var a,o=1,s=t.length,l=e.length;if(void 0!==n&&("ucs2"===(n=String(n).toLowerCase())||"ucs-2"===n||"utf16le"===n||"utf-16le"===n)){if(t.length<2||e.length<2)return-1;o=2,s/=2,l/=2,r/=2}function c(t,e){return 1===o?t[e]:t.readUInt16BE(e*o)}if(i){var u=-1;for(a=r;as&&(r=s-l),a=r;a>=0;a--){for(var f=!0,h=0;hi&&(n=i):n=i;var a=e.length;n>a/2&&(n=a/2);for(var o=0;o>8,i=r%256,a.push(i),a.push(n);return a}(e,t.length-r),t,r,n)}function w(t,r,n){return 0===r&&n===t.length?e.fromByteArray(t):e.fromByteArray(t.slice(r,n))}function T(t,e,r){r=Math.min(t.length,r);for(var n=[],i=e;i239?4:c>223?3:c>191?2:1;if(i+f<=r)switch(f){case 1:c<128&&(u=c);break;case 2:128==(192&(a=t[i+1]))&&(l=(31&c)<<6|63&a)>127&&(u=l);break;case 3:a=t[i+1],o=t[i+2],128==(192&a)&&128==(192&o)&&(l=(15&c)<<12|(63&a)<<6|63&o)>2047&&(l<55296||l>57343)&&(u=l);break;case 4:a=t[i+1],o=t[i+2],s=t[i+3],128==(192&a)&&128==(192&o)&&128==(192&s)&&(l=(15&c)<<18|(63&a)<<12|(63&o)<<6|63&s)>65535&&l<1114112&&(u=l)}null===u?(u=65533,f=1):u>65535&&(u-=65536,n.push(u>>>10&1023|55296),u=56320|1023&u),n.push(u),i+=f}return function(t){var e=t.length;if(e<=4096)return String.fromCharCode.apply(String,t);var r="",n=0;for(;ne&&(t+=" ... "),""},a.prototype.compare=function(t,e,r,n,i){if(B(t,Uint8Array)&&(t=a.from(t,t.offset,t.byteLength)),!a.isBuffer(t))throw new TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+typeof t);if(void 0===e&&(e=0),void 0===r&&(r=t?t.length:0),void 0===n&&(n=0),void 0===i&&(i=this.length),e<0||r>t.length||n<0||i>this.length)throw new RangeError("out of range index");if(n>=i&&e>=r)return 0;if(n>=i)return-1;if(e>=r)return 1;if(this===t)return 0;for(var o=(i>>>=0)-(n>>>=0),s=(r>>>=0)-(e>>>=0),l=Math.min(o,s),c=this.slice(n,i),u=t.slice(e,r),f=0;f>>=0,isFinite(r)?(r>>>=0,void 0===n&&(n="utf8")):(n=r,r=void 0)}var i=this.length-e;if((void 0===r||r>i)&&(r=i),t.length>0&&(r<0||e<0)||e>this.length)throw new RangeError("Attempt to write outside buffer bounds");n||(n="utf8");for(var a=!1;;)switch(n){case"hex":return g(this,t,e,r);case"utf8":case"utf-8":return v(this,t,e,r);case"ascii":return y(this,t,e,r);case"latin1":case"binary":return x(this,t,e,r);case"base64":return b(this,t,e,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return _(this,t,e,r);default:if(a)throw new TypeError("Unknown encoding: "+n);n=(""+n).toLowerCase(),a=!0}},a.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};function k(t,e,r){var n="";r=Math.min(t.length,r);for(var i=e;in)&&(r=n);for(var i="",a=e;ar)throw new RangeError("Trying to access beyond buffer length")}function L(t,e,r,n,i,o){if(!a.isBuffer(t))throw new TypeError('"buffer" argument must be a Buffer instance');if(e>i||et.length)throw new RangeError("Index out of range")}function C(t,e,r,n,i,a){if(r+n>t.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("Index out of range")}function P(t,e,r,i,a){return e=+e,r>>>=0,a||C(t,0,r,4),n.write(t,e,r,i,23,4),r+4}function I(t,e,r,i,a){return e=+e,r>>>=0,a||C(t,0,r,8),n.write(t,e,r,i,52,8),r+8}a.prototype.slice=function(t,e){var r=this.length;(t=~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),(e=void 0===e?r:~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),e>>=0,e>>>=0,r||E(t,e,this.length);for(var n=this[t],i=1,a=0;++a>>=0,e>>>=0,r||E(t,e,this.length);for(var n=this[t+--e],i=1;e>0&&(i*=256);)n+=this[t+--e]*i;return n},a.prototype.readUInt8=function(t,e){return t>>>=0,e||E(t,1,this.length),this[t]},a.prototype.readUInt16LE=function(t,e){return t>>>=0,e||E(t,2,this.length),this[t]|this[t+1]<<8},a.prototype.readUInt16BE=function(t,e){return t>>>=0,e||E(t,2,this.length),this[t]<<8|this[t+1]},a.prototype.readUInt32LE=function(t,e){return t>>>=0,e||E(t,4,this.length),(this[t]|this[t+1]<<8|this[t+2]<<16)+16777216*this[t+3]},a.prototype.readUInt32BE=function(t,e){return t>>>=0,e||E(t,4,this.length),16777216*this[t]+(this[t+1]<<16|this[t+2]<<8|this[t+3])},a.prototype.readIntLE=function(t,e,r){t>>>=0,e>>>=0,r||E(t,e,this.length);for(var n=this[t],i=1,a=0;++a=(i*=128)&&(n-=Math.pow(2,8*e)),n},a.prototype.readIntBE=function(t,e,r){t>>>=0,e>>>=0,r||E(t,e,this.length);for(var n=e,i=1,a=this[t+--n];n>0&&(i*=256);)a+=this[t+--n]*i;return a>=(i*=128)&&(a-=Math.pow(2,8*e)),a},a.prototype.readInt8=function(t,e){return t>>>=0,e||E(t,1,this.length),128&this[t]?-1*(255-this[t]+1):this[t]},a.prototype.readInt16LE=function(t,e){t>>>=0,e||E(t,2,this.length);var r=this[t]|this[t+1]<<8;return 32768&r?4294901760|r:r},a.prototype.readInt16BE=function(t,e){t>>>=0,e||E(t,2,this.length);var r=this[t+1]|this[t]<<8;return 32768&r?4294901760|r:r},a.prototype.readInt32LE=function(t,e){return t>>>=0,e||E(t,4,this.length),this[t]|this[t+1]<<8|this[t+2]<<16|this[t+3]<<24},a.prototype.readInt32BE=function(t,e){return t>>>=0,e||E(t,4,this.length),this[t]<<24|this[t+1]<<16|this[t+2]<<8|this[t+3]},a.prototype.readFloatLE=function(t,e){return t>>>=0,e||E(t,4,this.length),n.read(this,t,!0,23,4)},a.prototype.readFloatBE=function(t,e){return t>>>=0,e||E(t,4,this.length),n.read(this,t,!1,23,4)},a.prototype.readDoubleLE=function(t,e){return t>>>=0,e||E(t,8,this.length),n.read(this,t,!0,52,8)},a.prototype.readDoubleBE=function(t,e){return t>>>=0,e||E(t,8,this.length),n.read(this,t,!1,52,8)},a.prototype.writeUIntLE=function(t,e,r,n){(t=+t,e>>>=0,r>>>=0,n)||L(this,t,e,r,Math.pow(2,8*r)-1,0);var i=1,a=0;for(this[e]=255&t;++a>>=0,r>>>=0,n)||L(this,t,e,r,Math.pow(2,8*r)-1,0);var i=r-1,a=1;for(this[e+i]=255&t;--i>=0&&(a*=256);)this[e+i]=t/a&255;return e+r},a.prototype.writeUInt8=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,1,255,0),this[e]=255&t,e+1},a.prototype.writeUInt16LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,65535,0),this[e]=255&t,this[e+1]=t>>>8,e+2},a.prototype.writeUInt16BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,65535,0),this[e]=t>>>8,this[e+1]=255&t,e+2},a.prototype.writeUInt32LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,4294967295,0),this[e+3]=t>>>24,this[e+2]=t>>>16,this[e+1]=t>>>8,this[e]=255&t,e+4},a.prototype.writeUInt32BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,4294967295,0),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},a.prototype.writeIntLE=function(t,e,r,n){if(t=+t,e>>>=0,!n){var i=Math.pow(2,8*r-1);L(this,t,e,r,i-1,-i)}var a=0,o=1,s=0;for(this[e]=255&t;++a>0)-s&255;return e+r},a.prototype.writeIntBE=function(t,e,r,n){if(t=+t,e>>>=0,!n){var i=Math.pow(2,8*r-1);L(this,t,e,r,i-1,-i)}var a=r-1,o=1,s=0;for(this[e+a]=255&t;--a>=0&&(o*=256);)t<0&&0===s&&0!==this[e+a+1]&&(s=1),this[e+a]=(t/o>>0)-s&255;return e+r},a.prototype.writeInt8=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,1,127,-128),t<0&&(t=255+t+1),this[e]=255&t,e+1},a.prototype.writeInt16LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,32767,-32768),this[e]=255&t,this[e+1]=t>>>8,e+2},a.prototype.writeInt16BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,32767,-32768),this[e]=t>>>8,this[e+1]=255&t,e+2},a.prototype.writeInt32LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,2147483647,-2147483648),this[e]=255&t,this[e+1]=t>>>8,this[e+2]=t>>>16,this[e+3]=t>>>24,e+4},a.prototype.writeInt32BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,2147483647,-2147483648),t<0&&(t=4294967295+t+1),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},a.prototype.writeFloatLE=function(t,e,r){return P(this,t,e,!0,r)},a.prototype.writeFloatBE=function(t,e,r){return P(this,t,e,!1,r)},a.prototype.writeDoubleLE=function(t,e,r){return I(this,t,e,!0,r)},a.prototype.writeDoubleBE=function(t,e,r){return I(this,t,e,!1,r)},a.prototype.copy=function(t,e,r,n){if(!a.isBuffer(t))throw new TypeError("argument should be a Buffer");if(r||(r=0),n||0===n||(n=this.length),e>=t.length&&(e=t.length),e||(e=0),n>0&&n=this.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("sourceEnd out of bounds");n>this.length&&(n=this.length),t.length-e=0;--o)t[o+e]=this[o+r];else Uint8Array.prototype.set.call(t,this.subarray(r,n),e);return i},a.prototype.fill=function(t,e,r,n){if("string"==typeof t){if("string"==typeof e?(n=e,e=0,r=this.length):"string"==typeof r&&(n=r,r=this.length),void 0!==n&&"string"!=typeof n)throw new TypeError("encoding must be a string");if("string"==typeof n&&!a.isEncoding(n))throw new TypeError("Unknown encoding: "+n);if(1===t.length){var i=t.charCodeAt(0);("utf8"===n&&i<128||"latin1"===n)&&(t=i)}}else"number"==typeof t&&(t&=255);if(e<0||this.length>>=0,r=void 0===r?this.length:r>>>0,t||(t=0),"number"==typeof t)for(o=e;o55295&&r<57344){if(!i){if(r>56319){(e-=3)>-1&&a.push(239,191,189);continue}if(o+1===n){(e-=3)>-1&&a.push(239,191,189);continue}i=r;continue}if(r<56320){(e-=3)>-1&&a.push(239,191,189),i=r;continue}r=65536+(i-55296<<10|r-56320)}else i&&(e-=3)>-1&&a.push(239,191,189);if(i=null,r<128){if((e-=1)<0)break;a.push(r)}else if(r<2048){if((e-=2)<0)break;a.push(r>>6|192,63&r|128)}else if(r<65536){if((e-=3)<0)break;a.push(r>>12|224,r>>6&63|128,63&r|128)}else{if(!(r<1114112))throw new Error("Invalid code point");if((e-=4)<0)break;a.push(r>>18|240,r>>12&63|128,r>>6&63|128,63&r|128)}}return a}function R(t){return e.toByteArray(function(t){if((t=(t=t.split("=")[0]).trim().replace(O,"")).length<2)return"";for(;t.length%4!=0;)t+="=";return t}(t))}function F(t,e,r,n){for(var i=0;i=e.length||i>=t.length);++i)e[i+r]=t[i];return i}function B(t,e){return t instanceof e||null!=t&&null!=t.constructor&&null!=t.constructor.name&&t.constructor.name===e.name}function N(t){return t!=t}}).call(this)}).call(this,t("buffer").Buffer)},{"base64-js":75,buffer:80,ieee754:225}],81:[function(t,e,r){e.exports=function(t,e,r){return er?r:t:te?e:t}},{}],82:[function(t,e,r){"use strict";var n=t("clamp");function i(t,e){null==e&&(e=!0);var r=t[0],i=t[1],a=t[2],o=t[3];return null==o&&(o=e?1:255),e&&(r*=255,i*=255,a*=255,o*=255),16777216*(r=255&n(r,0,255))+((i=255&n(i,0,255))<<16)+((a=255&n(a,0,255))<<8)+(o=255&n(o,0,255))}e.exports=i,e.exports.to=i,e.exports.from=function(t,e){var r=(t=+t)>>>24,n=(16711680&t)>>>16,i=(65280&t)>>>8,a=255&t;return!1===e?[r,n,i,a]:[r/255,n/255,i/255,a/255]}},{clamp:81}],83:[function(t,e,r){"use strict";e.exports={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}},{}],84:[function(t,e,r){"use strict";var n=t("color-rgba"),i=t("clamp"),a=t("dtype");e.exports=function(t,e){"float"!==e&&e||(e="array"),"uint"===e&&(e="uint8"),"uint_clamped"===e&&(e="uint8_clamped");var r=new(a(e))(4),o="uint8"!==e&&"uint8_clamped"!==e;return t.length&&"string"!=typeof t||((t=n(t))[0]/=255,t[1]/=255,t[2]/=255),function(t){return t instanceof Uint8Array||t instanceof Uint8ClampedArray||!!(Array.isArray(t)&&(t[0]>1||0===t[0])&&(t[1]>1||0===t[1])&&(t[2]>1||0===t[2])&&(!t[3]||t[3]>1))}(t)?(r[0]=t[0],r[1]=t[1],r[2]=t[2],r[3]=null!=t[3]?t[3]:255,o&&(r[0]/=255,r[1]/=255,r[2]/=255,r[3]/=255),r):(o?(r[0]=t[0],r[1]=t[1],r[2]=t[2],r[3]=null!=t[3]?t[3]:1):(r[0]=i(Math.floor(255*t[0]),0,255),r[1]=i(Math.floor(255*t[1]),0,255),r[2]=i(Math.floor(255*t[2]),0,255),r[3]=null==t[3]?255:i(Math.floor(255*t[3]),0,255)),r)}},{clamp:81,"color-rgba":86,dtype:121}],85:[function(t,e,r){(function(r){(function(){"use strict";var n=t("color-name"),i=t("is-plain-obj"),a=t("defined");e.exports=function(t){var e,s,l=[],c=1;if("string"==typeof t)if(n[t])l=n[t].slice(),s="rgb";else if("transparent"===t)c=0,s="rgb",l=[0,0,0];else if(/^#[A-Fa-f0-9]+$/.test(t)){var u=(p=t.slice(1)).length;c=1,u<=4?(l=[parseInt(p[0]+p[0],16),parseInt(p[1]+p[1],16),parseInt(p[2]+p[2],16)],4===u&&(c=parseInt(p[3]+p[3],16)/255)):(l=[parseInt(p[0]+p[1],16),parseInt(p[2]+p[3],16),parseInt(p[4]+p[5],16)],8===u&&(c=parseInt(p[6]+p[7],16)/255)),l[0]||(l[0]=0),l[1]||(l[1]=0),l[2]||(l[2]=0),s="rgb"}else if(e=/^((?:rgb|hs[lvb]|hwb|cmyk?|xy[zy]|gray|lab|lchu?v?|[ly]uv|lms)a?)\s*\(([^\)]*)\)/.exec(t)){var f=e[1],h="rgb"===f,p=f.replace(/a$/,"");s=p;u="cmyk"===p?4:"gray"===p?1:3;l=e[2].trim().split(/\s*,\s*/).map((function(t,e){if(/%$/.test(t))return e===u?parseFloat(t)/100:"rgb"===p?255*parseFloat(t)/100:parseFloat(t);if("h"===p[e]){if(/deg$/.test(t))return parseFloat(t);if(void 0!==o[t])return o[t]}return parseFloat(t)})),f===p&&l.push(1),c=h||void 0===l[u]?1:l[u],l=l.slice(0,u)}else t.length>10&&/[0-9](?:\s|\/)/.test(t)&&(l=t.match(/([0-9]+)/g).map((function(t){return parseFloat(t)})),s=t.match(/([a-z])/gi).join("").toLowerCase());else if(isNaN(t))if(i(t)){var d=a(t.r,t.red,t.R,null);null!==d?(s="rgb",l=[d,a(t.g,t.green,t.G),a(t.b,t.blue,t.B)]):(s="hsl",l=[a(t.h,t.hue,t.H),a(t.s,t.saturation,t.S),a(t.l,t.lightness,t.L,t.b,t.brightness)]),c=a(t.a,t.alpha,t.opacity,1),null!=t.opacity&&(c/=100)}else(Array.isArray(t)||r.ArrayBuffer&&ArrayBuffer.isView&&ArrayBuffer.isView(t))&&(l=[t[0],t[1],t[2]],s="rgb",c=4===t.length?t[3]:1);else s="rgb",l=[t>>>16,(65280&t)>>>8,255&t];return{space:s,values:l,alpha:c}};var o={red:0,orange:60,yellow:120,green:180,blue:240,purple:300}}).call(this)}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"color-name":83,defined:118,"is-plain-obj":231}],86:[function(t,e,r){"use strict";var n=t("color-parse"),i=t("color-space/hsl"),a=t("clamp");e.exports=function(t){var e,r=n(t);return r.space?((e=Array(3))[0]=a(r.values[0],0,255),e[1]=a(r.values[1],0,255),e[2]=a(r.values[2],0,255),"h"===r.space[0]&&(e=i.rgb(e)),e.push(a(r.alpha,0,1)),e):[]}},{clamp:81,"color-parse":85,"color-space/hsl":87}],87:[function(t,e,r){"use strict";var n=t("./rgb");e.exports={name:"hsl",min:[0,0,0],max:[360,100,100],channel:["hue","saturation","lightness"],alias:["HSL"],rgb:function(t){var e,r,n,i,a,o=t[0]/360,s=t[1]/100,l=t[2]/100;if(0===s)return[a=255*l,a,a];e=2*l-(r=l<.5?l*(1+s):l+s-l*s),i=[0,0,0];for(var c=0;c<3;c++)(n=o+1/3*-(c-1))<0?n++:n>1&&n--,a=6*n<1?e+6*(r-e)*n:2*n<1?r:3*n<2?e+(r-e)*(2/3-n)*6:e,i[c]=255*a;return i}},n.hsl=function(t){var e,r,n=t[0]/255,i=t[1]/255,a=t[2]/255,o=Math.min(n,i,a),s=Math.max(n,i,a),l=s-o;return s===o?e=0:n===s?e=(i-a)/l:i===s?e=2+(a-n)/l:a===s&&(e=4+(n-i)/l),(e=Math.min(60*e,360))<0&&(e+=360),r=(o+s)/2,[e,100*(s===o?0:r<=.5?l/(s+o):l/(2-s-o)),100*r]}},{"./rgb":88}],88:[function(t,e,r){"use strict";e.exports={name:"rgb",min:[0,0,0],max:[255,255,255],channel:["red","green","blue"],alias:["RGB"]}},{}],89:[function(t,e,r){e.exports={AFG:"afghan",ALA:"\\b\\wland",ALB:"albania",DZA:"algeria",ASM:"^(?=.*americ).*samoa",AND:"andorra",AGO:"angola",AIA:"anguill?a",ATA:"antarctica",ATG:"antigua",ARG:"argentin",ARM:"armenia",ABW:"^(?!.*bonaire).*\\baruba",AUS:"australia",AUT:"^(?!.*hungary).*austria|\\baustri.*\\bemp",AZE:"azerbaijan",BHS:"bahamas",BHR:"bahrain",BGD:"bangladesh|^(?=.*east).*paki?stan",BRB:"barbados",BLR:"belarus|byelo",BEL:"^(?!.*luxem).*belgium",BLZ:"belize|^(?=.*british).*honduras",BEN:"benin|dahome",BMU:"bermuda",BTN:"bhutan",BOL:"bolivia",BES:"^(?=.*bonaire).*eustatius|^(?=.*carib).*netherlands|\\bbes.?islands",BIH:"herzegovina|bosnia",BWA:"botswana|bechuana",BVT:"bouvet",BRA:"brazil",IOT:"british.?indian.?ocean",BRN:"brunei",BGR:"bulgaria",BFA:"burkina|\\bfaso|upper.?volta",BDI:"burundi",CPV:"verde",KHM:"cambodia|kampuchea|khmer",CMR:"cameroon",CAN:"canada",CYM:"cayman",CAF:"\\bcentral.african.republic",TCD:"\\bchad",CHL:"\\bchile",CHN:"^(?!.*\\bmac)(?!.*\\bhong)(?!.*\\btai)(?!.*\\brep).*china|^(?=.*peo)(?=.*rep).*china",CXR:"christmas",CCK:"\\bcocos|keeling",COL:"colombia",COM:"comoro",COG:"^(?!.*\\bdem)(?!.*\\bd[\\.]?r)(?!.*kinshasa)(?!.*zaire)(?!.*belg)(?!.*l.opoldville)(?!.*free).*\\bcongo",COK:"\\bcook",CRI:"costa.?rica",CIV:"ivoire|ivory",HRV:"croatia",CUB:"\\bcuba",CUW:"^(?!.*bonaire).*\\bcura(c|\xe7)ao",CYP:"cyprus",CSK:"czechoslovakia",CZE:"^(?=.*rep).*czech|czechia|bohemia",COD:"\\bdem.*congo|congo.*\\bdem|congo.*\\bd[\\.]?r|\\bd[\\.]?r.*congo|belgian.?congo|congo.?free.?state|kinshasa|zaire|l.opoldville|drc|droc|rdc",DNK:"denmark",DJI:"djibouti",DMA:"dominica(?!n)",DOM:"dominican.rep",ECU:"ecuador",EGY:"egypt",SLV:"el.?salvador",GNQ:"guine.*eq|eq.*guine|^(?=.*span).*guinea",ERI:"eritrea",EST:"estonia",ETH:"ethiopia|abyssinia",FLK:"falkland|malvinas",FRO:"faroe|faeroe",FJI:"fiji",FIN:"finland",FRA:"^(?!.*\\bdep)(?!.*martinique).*france|french.?republic|\\bgaul",GUF:"^(?=.*french).*guiana",PYF:"french.?polynesia|tahiti",ATF:"french.?southern",GAB:"gabon",GMB:"gambia",GEO:"^(?!.*south).*georgia",DDR:"german.?democratic.?republic|democratic.?republic.*germany|east.germany",DEU:"^(?!.*east).*germany|^(?=.*\\bfed.*\\brep).*german",GHA:"ghana|gold.?coast",GIB:"gibraltar",GRC:"greece|hellenic|hellas",GRL:"greenland",GRD:"grenada",GLP:"guadeloupe",GUM:"\\bguam",GTM:"guatemala",GGY:"guernsey",GIN:"^(?!.*eq)(?!.*span)(?!.*bissau)(?!.*portu)(?!.*new).*guinea",GNB:"bissau|^(?=.*portu).*guinea",GUY:"guyana|british.?guiana",HTI:"haiti",HMD:"heard.*mcdonald",VAT:"holy.?see|vatican|papal.?st",HND:"^(?!.*brit).*honduras",HKG:"hong.?kong",HUN:"^(?!.*austr).*hungary",ISL:"iceland",IND:"india(?!.*ocea)",IDN:"indonesia",IRN:"\\biran|persia",IRQ:"\\biraq|mesopotamia",IRL:"(^ireland)|(^republic.*ireland)",IMN:"^(?=.*isle).*\\bman",ISR:"israel",ITA:"italy",JAM:"jamaica",JPN:"japan",JEY:"jersey",JOR:"jordan",KAZ:"kazak",KEN:"kenya|british.?east.?africa|east.?africa.?prot",KIR:"kiribati",PRK:"^(?=.*democrat|people|north|d.*p.*.r).*\\bkorea|dprk|korea.*(d.*p.*r)",KWT:"kuwait",KGZ:"kyrgyz|kirghiz",LAO:"\\blaos?\\b",LVA:"latvia",LBN:"lebanon",LSO:"lesotho|basuto",LBR:"liberia",LBY:"libya",LIE:"liechtenstein",LTU:"lithuania",LUX:"^(?!.*belg).*luxem",MAC:"maca(o|u)",MDG:"madagascar|malagasy",MWI:"malawi|nyasa",MYS:"malaysia",MDV:"maldive",MLI:"\\bmali\\b",MLT:"\\bmalta",MHL:"marshall",MTQ:"martinique",MRT:"mauritania",MUS:"mauritius",MYT:"\\bmayotte",MEX:"\\bmexic",FSM:"fed.*micronesia|micronesia.*fed",MCO:"monaco",MNG:"mongolia",MNE:"^(?!.*serbia).*montenegro",MSR:"montserrat",MAR:"morocco|\\bmaroc",MOZ:"mozambique",MMR:"myanmar|burma",NAM:"namibia",NRU:"nauru",NPL:"nepal",NLD:"^(?!.*\\bant)(?!.*\\bcarib).*netherlands",ANT:"^(?=.*\\bant).*(nether|dutch)",NCL:"new.?caledonia",NZL:"new.?zealand",NIC:"nicaragua",NER:"\\bniger(?!ia)",NGA:"nigeria",NIU:"niue",NFK:"norfolk",MNP:"mariana",NOR:"norway",OMN:"\\boman|trucial",PAK:"^(?!.*east).*paki?stan",PLW:"palau",PSE:"palestin|\\bgaza|west.?bank",PAN:"panama",PNG:"papua|new.?guinea",PRY:"paraguay",PER:"peru",PHL:"philippines",PCN:"pitcairn",POL:"poland",PRT:"portugal",PRI:"puerto.?rico",QAT:"qatar",KOR:"^(?!.*d.*p.*r)(?!.*democrat)(?!.*people)(?!.*north).*\\bkorea(?!.*d.*p.*r)",MDA:"moldov|b(a|e)ssarabia",REU:"r(e|\xe9)union",ROU:"r(o|u|ou)mania",RUS:"\\brussia|soviet.?union|u\\.?s\\.?s\\.?r|socialist.?republics",RWA:"rwanda",BLM:"barth(e|\xe9)lemy",SHN:"helena",KNA:"kitts|\\bnevis",LCA:"\\blucia",MAF:"^(?=.*collectivity).*martin|^(?=.*france).*martin(?!ique)|^(?=.*french).*martin(?!ique)",SPM:"miquelon",VCT:"vincent",WSM:"^(?!.*amer).*samoa",SMR:"san.?marino",STP:"\\bs(a|\xe3)o.?tom(e|\xe9)",SAU:"\\bsa\\w*.?arabia",SEN:"senegal",SRB:"^(?!.*monte).*serbia",SYC:"seychell",SLE:"sierra",SGP:"singapore",SXM:"^(?!.*martin)(?!.*saba).*maarten",SVK:"^(?!.*cze).*slovak",SVN:"slovenia",SLB:"solomon",SOM:"somali",ZAF:"south.africa|s\\\\..?africa",SGS:"south.?georgia|sandwich",SSD:"\\bs\\w*.?sudan",ESP:"spain",LKA:"sri.?lanka|ceylon",SDN:"^(?!.*\\bs(?!u)).*sudan",SUR:"surinam|dutch.?guiana",SJM:"svalbard",SWZ:"swaziland",SWE:"sweden",CHE:"switz|swiss",SYR:"syria",TWN:"taiwan|taipei|formosa|^(?!.*peo)(?=.*rep).*china",TJK:"tajik",THA:"thailand|\\bsiam",MKD:"macedonia|fyrom",TLS:"^(?=.*leste).*timor|^(?=.*east).*timor",TGO:"togo",TKL:"tokelau",TON:"tonga",TTO:"trinidad|tobago",TUN:"tunisia",TUR:"turkey",TKM:"turkmen",TCA:"turks",TUV:"tuvalu",UGA:"uganda",UKR:"ukrain",ARE:"emirates|^u\\.?a\\.?e\\.?$|united.?arab.?em",GBR:"united.?kingdom|britain|^u\\.?k\\.?$",TZA:"tanzania",USA:"united.?states\\b(?!.*islands)|\\bu\\.?s\\.?a\\.?\\b|^\\s*u\\.?s\\.?\\b(?!.*islands)",UMI:"minor.?outlying.?is",URY:"uruguay",UZB:"uzbek",VUT:"vanuatu|new.?hebrides",VEN:"venezuela",VNM:"^(?!.*republic).*viet.?nam|^(?=.*socialist).*viet.?nam",VGB:"^(?=.*\\bu\\.?\\s?k).*virgin|^(?=.*brit).*virgin|^(?=.*kingdom).*virgin",VIR:"^(?=.*\\bu\\.?\\s?s).*virgin|^(?=.*states).*virgin",WLF:"futuna|wallis",ESH:"western.sahara",YEM:"^(?!.*arab)(?!.*north)(?!.*sana)(?!.*peo)(?!.*dem)(?!.*south)(?!.*aden)(?!.*\\bp\\.?d\\.?r).*yemen",YMD:"^(?=.*peo).*yemen|^(?!.*rep)(?=.*dem).*yemen|^(?=.*south).*yemen|^(?=.*aden).*yemen|^(?=.*\\bp\\.?d\\.?r).*yemen",YUG:"yugoslavia",ZMB:"zambia|northern.?rhodesia",EAZ:"zanzibar",ZWE:"zimbabwe|^(?!.*northern).*rhodesia"}},{}],90:[function(t,e,r){e.exports=["xx-small","x-small","small","medium","large","x-large","xx-large","larger","smaller"]},{}],91:[function(t,e,r){e.exports=["normal","condensed","semi-condensed","extra-condensed","ultra-condensed","expanded","semi-expanded","extra-expanded","ultra-expanded"]},{}],92:[function(t,e,r){e.exports=["normal","italic","oblique"]},{}],93:[function(t,e,r){e.exports=["normal","bold","bolder","lighter","100","200","300","400","500","600","700","800","900"]},{}],94:[function(t,e,r){"use strict";e.exports={parse:t("./parse"),stringify:t("./stringify")}},{"./parse":96,"./stringify":97}],95:[function(t,e,r){"use strict";var n=t("css-font-size-keywords");e.exports={isSize:function(t){return/^[\d\.]/.test(t)||-1!==t.indexOf("/")||-1!==n.indexOf(t)}}},{"css-font-size-keywords":90}],96:[function(t,e,r){"use strict";var n=t("unquote"),i=t("css-global-keywords"),a=t("css-system-font-keywords"),o=t("css-font-weight-keywords"),s=t("css-font-style-keywords"),l=t("css-font-stretch-keywords"),c=t("string-split-by"),u=t("./lib/util").isSize;e.exports=h;var f=h.cache={};function h(t){if("string"!=typeof t)throw new Error("Font argument must be a string.");if(f[t])return f[t];if(""===t)throw new Error("Cannot parse an empty string.");if(-1!==a.indexOf(t))return f[t]={system:t};for(var e,r={style:"normal",variant:"normal",weight:"normal",stretch:"normal",lineHeight:"normal",size:"1rem",family:["serif"]},h=c(t,/\s+/);e=h.shift();){if(-1!==i.indexOf(e))return["style","variant","weight","stretch"].forEach((function(t){r[t]=e})),f[t]=r;if(-1===s.indexOf(e))if("normal"!==e&&"small-caps"!==e)if(-1===l.indexOf(e)){if(-1===o.indexOf(e)){if(u(e)){var d=c(e,"/");if(r.size=d[0],null!=d[1]?r.lineHeight=p(d[1]):"/"===h[0]&&(h.shift(),r.lineHeight=p(h.shift())),!h.length)throw new Error("Missing required font-family.");return r.family=c(h.join(" "),/\s*,\s*/).map(n),f[t]=r}throw new Error("Unknown or unsupported font token: "+e)}r.weight=e}else r.stretch=e;else r.variant=e;else r.style=e}throw new Error("Missing required font-size.")}function p(t){var e=parseFloat(t);return e.toString()===t?e:t}},{"./lib/util":95,"css-font-stretch-keywords":91,"css-font-style-keywords":92,"css-font-weight-keywords":93,"css-global-keywords":98,"css-system-font-keywords":99,"string-split-by":299,unquote:323}],97:[function(t,e,r){"use strict";var n=t("pick-by-alias"),i=t("./lib/util").isSize,a=m(t("css-global-keywords")),o=m(t("css-system-font-keywords")),s=m(t("css-font-weight-keywords")),l=m(t("css-font-style-keywords")),c=m(t("css-font-stretch-keywords")),u={normal:1,"small-caps":1},f={serif:1,"sans-serif":1,monospace:1,cursive:1,fantasy:1,"system-ui":1},h="1rem",p="serif";function d(t,e){if(t&&!e[t]&&!a[t])throw Error("Unknown keyword `"+t+"`");return t}function m(t){for(var e={},r=0;re?1:t>=e?0:NaN}function r(t){var r;return 1===t.length&&(r=t,t=function(t,n){return e(r(t),n)}),{left:function(e,r,n,i){for(null==n&&(n=0),null==i&&(i=e.length);n>>1;t(e[a],r)<0?n=a+1:i=a}return n},right:function(e,r,n,i){for(null==n&&(n=0),null==i&&(i=e.length);n>>1;t(e[a],r)>0?i=a:n=a+1}return n}}}var n=r(e),i=n.right,a=n.left;function o(t,e){return[t,e]}function s(t){return null===t?NaN:+t}function l(t,e){var r,n,i=t.length,a=0,o=-1,l=0,c=0;if(null==e)for(;++o1)return c/(a-1)}function c(t,e){var r=l(t,e);return r?Math.sqrt(r):r}function u(t,e){var r,n,i,a=t.length,o=-1;if(null==e){for(;++o=r)for(n=i=r;++or&&(n=r),i=r)for(n=i=r;++or&&(n=r),i=0?(a>=v?10:a>=y?5:a>=x?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(a>=v?10:a>=y?5:a>=x?2:1)}function _(t,e,r){var n=Math.abs(e-t)/Math.max(0,r),i=Math.pow(10,Math.floor(Math.log(n)/Math.LN10)),a=n/i;return a>=v?i*=10:a>=y?i*=5:a>=x&&(i*=2),e=1)return+r(t[n-1],n-1,t);var n,i=(n-1)*e,a=Math.floor(i),o=+r(t[a],a,t);return o+(+r(t[a+1],a+1,t)-o)*(i-a)}}function k(t,e){var r,n,i=t.length,a=-1;if(null==e){for(;++a=r)for(n=r;++ar&&(n=r)}else for(;++a=r)for(n=r;++ar&&(n=r);return n}function A(t){if(!(i=t.length))return[];for(var e=-1,r=k(t,M),n=new Array(r);++et?1:e>=t?0:NaN},t.deviation=c,t.extent=u,t.histogram=function(){var t=m,e=u,r=w;function n(n){var a,o,s=n.length,l=new Array(s);for(a=0;af;)h.pop(),--p;var d,m=new Array(p+1);for(a=0;a<=p;++a)(d=m[a]=[]).x0=a>0?h[a-1]:u,d.x1=a=r)for(n=r;++an&&(n=r)}else for(;++a=r)for(n=r;++an&&(n=r);return n},t.mean=function(t,e){var r,n=t.length,i=n,a=-1,o=0;if(null==e)for(;++a=0;)for(e=(n=t[i]).length;--e>=0;)r[--o]=n[e];return r},t.min=k,t.pairs=function(t,e){null==e&&(e=o);for(var r=0,n=t.length-1,i=t[0],a=new Array(n<0?0:n);r0)return[t];if((n=e0)for(t=Math.ceil(t/o),e=Math.floor(e/o),a=new Array(i=Math.ceil(e-t+1));++s=l.length)return null!=t&&n.sort(t),null!=e?e(n):n;for(var s,c,f,h=-1,p=n.length,d=l[i++],m=r(),g=a();++hl.length)return r;var i,a=c[n-1];return null!=e&&n>=l.length?i=r.entries():(i=[],r.each((function(e,r){i.push({key:r,values:t(e,n)})}))),null!=a?i.sort((function(t,e){return a(t.key,e.key)})):i}(u(t,0,a,o),0)},key:function(t){return l.push(t),s},sortKeys:function(t){return c[l.length-1]=t,s},sortValues:function(e){return t=e,s},rollup:function(t){return e=t,s}}},t.set=c,t.map=r,t.keys=function(t){var e=[];for(var r in t)e.push(r);return e},t.values=function(t){var e=[];for(var r in t)e.push(t[r]);return e},t.entries=function(t){var e=[];for(var r in t)e.push({key:r,value:t[r]});return e},Object.defineProperty(t,"__esModule",{value:!0})}))},{}],104:[function(t,e,r){!function(t,n){"object"==typeof r&&void 0!==e?n(r):n((t=t||self).d3=t.d3||{})}(this,(function(t){"use strict";function e(t,e,r){t.prototype=e.prototype=r,r.constructor=t}function r(t,e){var r=Object.create(t.prototype);for(var n in e)r[n]=e[n];return r}function n(){}var i="\\s*([+-]?\\d+)\\s*",a="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)\\s*",o="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)%\\s*",s=/^#([0-9a-f]{3,8})$/,l=new RegExp("^rgb\\("+[i,i,i]+"\\)$"),c=new RegExp("^rgb\\("+[o,o,o]+"\\)$"),u=new RegExp("^rgba\\("+[i,i,i,a]+"\\)$"),f=new RegExp("^rgba\\("+[o,o,o,a]+"\\)$"),h=new RegExp("^hsl\\("+[a,o,o]+"\\)$"),p=new RegExp("^hsla\\("+[a,o,o,a]+"\\)$"),d={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};function m(){return this.rgb().formatHex()}function g(){return this.rgb().formatRgb()}function v(t){var e,r;return t=(t+"").trim().toLowerCase(),(e=s.exec(t))?(r=e[1].length,e=parseInt(e[1],16),6===r?y(e):3===r?new w(e>>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===r?x(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===r?x(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=l.exec(t))?new w(e[1],e[2],e[3],1):(e=c.exec(t))?new w(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=u.exec(t))?x(e[1],e[2],e[3],e[4]):(e=f.exec(t))?x(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=h.exec(t))?M(e[1],e[2]/100,e[3]/100,1):(e=p.exec(t))?M(e[1],e[2]/100,e[3]/100,e[4]):d.hasOwnProperty(t)?y(d[t]):"transparent"===t?new w(NaN,NaN,NaN,0):null}function y(t){return new w(t>>16&255,t>>8&255,255&t,1)}function x(t,e,r,n){return n<=0&&(t=e=r=NaN),new w(t,e,r,n)}function b(t){return t instanceof n||(t=v(t)),t?new w((t=t.rgb()).r,t.g,t.b,t.opacity):new w}function _(t,e,r,n){return 1===arguments.length?b(t):new w(t,e,r,null==n?1:n)}function w(t,e,r,n){this.r=+t,this.g=+e,this.b=+r,this.opacity=+n}function T(){return"#"+A(this.r)+A(this.g)+A(this.b)}function k(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function A(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function M(t,e,r,n){return n<=0?t=e=r=NaN:r<=0||r>=1?t=e=NaN:e<=0&&(t=NaN),new L(t,e,r,n)}function S(t){if(t instanceof L)return new L(t.h,t.s,t.l,t.opacity);if(t instanceof n||(t=v(t)),!t)return new L;if(t instanceof L)return t;var e=(t=t.rgb()).r/255,r=t.g/255,i=t.b/255,a=Math.min(e,r,i),o=Math.max(e,r,i),s=NaN,l=o-a,c=(o+a)/2;return l?(s=e===o?(r-i)/l+6*(r0&&c<1?0:s,new L(s,l,c,t.opacity)}function E(t,e,r,n){return 1===arguments.length?S(t):new L(t,e,r,null==n?1:n)}function L(t,e,r,n){this.h=+t,this.s=+e,this.l=+r,this.opacity=+n}function C(t,e,r){return 255*(t<60?e+(r-e)*t/60:t<180?r:t<240?e+(r-e)*(240-t)/60:e)}e(n,v,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:m,formatHex:m,formatHsl:function(){return S(this).formatHsl()},formatRgb:g,toString:g}),e(w,_,r(n,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new w(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new w(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:T,formatHex:T,formatRgb:k,toString:k})),e(L,E,r(n,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new L(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new L(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,r=this.l,n=r+(r<.5?r:1-r)*e,i=2*r-n;return new w(C(t>=240?t-240:t+120,i,n),C(t,i,n),C(t<120?t+240:t-120,i,n),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));var P=Math.PI/180,I=180/Math.PI,O=6/29,z=3*O*O;function D(t){if(t instanceof F)return new F(t.l,t.a,t.b,t.opacity);if(t instanceof q)return G(t);t instanceof w||(t=b(t));var e,r,n=U(t.r),i=U(t.g),a=U(t.b),o=B((.2225045*n+.7168786*i+.0606169*a)/1);return n===i&&i===a?e=r=o:(e=B((.4360747*n+.3850649*i+.1430804*a)/.96422),r=B((.0139322*n+.0971045*i+.7141733*a)/.82521)),new F(116*o-16,500*(e-o),200*(o-r),t.opacity)}function R(t,e,r,n){return 1===arguments.length?D(t):new F(t,e,r,null==n?1:n)}function F(t,e,r,n){this.l=+t,this.a=+e,this.b=+r,this.opacity=+n}function B(t){return t>.008856451679035631?Math.pow(t,1/3):t/z+4/29}function N(t){return t>O?t*t*t:z*(t-4/29)}function j(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function U(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function V(t){if(t instanceof q)return new q(t.h,t.c,t.l,t.opacity);if(t instanceof F||(t=D(t)),0===t.a&&0===t.b)return new q(NaN,0=0&&(r=t.slice(n+1),t=t.slice(0,n)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:r}}))}function a(t,e){for(var r,n=0,i=t.length;n0)for(var r,n,i=new Array(r),a=0;ah+c||np+c||au.index){var f=h-s.x-s.vx,g=p-s.y-s.vy,v=f*f+g*g;vt.r&&(t.r=t[e].r)}function h(){if(r){var e,i,a=r.length;for(n=new Array(a),e=0;e=c)){(t.data!==r||t.next)&&(0===f&&(d+=(f=o())*f),0===h&&(d+=(h=o())*h),d1?(null==r?u.remove(t):u.set(t,v(r)),e):u.get(t)},find:function(e,r,n){var i,a,o,s,l,c=0,u=t.length;for(null==n?n=1/0:n*=n,c=0;c1?(h.on(t,r),e):h.on(t)}}},t.forceX=function(t){var e,r,n,i=a(.1);function o(t){for(var i,a=0,o=e.length;a1?n[0]+n.slice(2):n,+t.slice(r+1)]}function r(t){return(t=e(Math.abs(t)))?t[1]:NaN}var n,i=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function a(t){if(!(e=i.exec(t)))throw new Error("invalid format: "+t);var e;return new o({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function o(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function s(t,r){var n=e(t,r);if(!n)return t+"";var i=n[0],a=n[1];return a<0?"0."+new Array(-a).join("0")+i:i.length>a+1?i.slice(0,a+1)+"."+i.slice(a+1):i+new Array(a-i.length+2).join("0")}a.prototype=o.prototype,o.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var l={"%":function(t,e){return(100*t).toFixed(e)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:function(t,e){return t.toExponential(e)},f:function(t,e){return t.toFixed(e)},g:function(t,e){return t.toPrecision(e)},o:function(t){return Math.round(t).toString(8)},p:function(t,e){return s(100*t,e)},r:s,s:function(t,r){var i=e(t,r);if(!i)return t+"";var a=i[0],o=i[1],s=o-(n=3*Math.max(-8,Math.min(8,Math.floor(o/3))))+1,l=a.length;return s===l?a:s>l?a+new Array(s-l+1).join("0"):s>0?a.slice(0,s)+"."+a.slice(s):"0."+new Array(1-s).join("0")+e(t,Math.max(0,r+s-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}};function c(t){return t}var u,f=Array.prototype.map,h=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"];function p(t){var e,i,o=void 0===t.grouping||void 0===t.thousands?c:(e=f.call(t.grouping,Number),i=t.thousands+"",function(t,r){for(var n=t.length,a=[],o=0,s=e[0],l=0;n>0&&s>0&&(l+s+1>r&&(s=Math.max(1,r-l)),a.push(t.substring(n-=s,n+s)),!((l+=s+1)>r));)s=e[o=(o+1)%e.length];return a.reverse().join(i)}),s=void 0===t.currency?"":t.currency[0]+"",u=void 0===t.currency?"":t.currency[1]+"",p=void 0===t.decimal?".":t.decimal+"",d=void 0===t.numerals?c:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(f.call(t.numerals,String)),m=void 0===t.percent?"%":t.percent+"",g=void 0===t.minus?"-":t.minus+"",v=void 0===t.nan?"NaN":t.nan+"";function y(t){var e=(t=a(t)).fill,r=t.align,i=t.sign,c=t.symbol,f=t.zero,y=t.width,x=t.comma,b=t.precision,_=t.trim,w=t.type;"n"===w?(x=!0,w="g"):l[w]||(void 0===b&&(b=12),_=!0,w="g"),(f||"0"===e&&"="===r)&&(f=!0,e="0",r="=");var T="$"===c?s:"#"===c&&/[boxX]/.test(w)?"0"+w.toLowerCase():"",k="$"===c?u:/[%p]/.test(w)?m:"",A=l[w],M=/[defgprs%]/.test(w);function S(t){var a,s,l,c=T,u=k;if("c"===w)u=A(t)+u,t="";else{var m=(t=+t)<0||1/t<0;if(t=isNaN(t)?v:A(Math.abs(t),b),_&&(t=function(t){t:for(var e,r=t.length,n=1,i=-1;n0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),m&&0==+t&&"+"!==i&&(m=!1),c=(m?"("===i?i:g:"-"===i||"("===i?"":i)+c,u=("s"===w?h[8+n/3]:"")+u+(m&&"("===i?")":""),M)for(a=-1,s=t.length;++a(l=t.charCodeAt(a))||l>57){u=(46===l?p+t.slice(a+1):t.slice(a))+u,t=t.slice(0,a);break}}x&&!f&&(t=o(t,1/0));var S=c.length+t.length+u.length,E=S>1)+c+t+u+E.slice(S);break;default:t=E+c+t+u}return d(t)}return b=void 0===b?6:/[gprs]/.test(w)?Math.max(1,Math.min(21,b)):Math.max(0,Math.min(20,b)),S.toString=function(){return t+""},S}return{format:y,formatPrefix:function(t,e){var n=y(((t=a(t)).type="f",t)),i=3*Math.max(-8,Math.min(8,Math.floor(r(e)/3))),o=Math.pow(10,-i),s=h[8+i/3];return function(t){return n(o*t)+s}}}}function d(e){return u=p(e),t.format=u.format,t.formatPrefix=u.formatPrefix,u}d({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"}),t.FormatSpecifier=o,t.formatDefaultLocale=d,t.formatLocale=p,t.formatSpecifier=a,t.precisionFixed=function(t){return Math.max(0,-r(Math.abs(t)))},t.precisionPrefix=function(t,e){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(r(e)/3)))-r(Math.abs(t)))},t.precisionRound=function(t,e){return t=Math.abs(t),e=Math.abs(e)-t,Math.max(0,r(e)-r(t))+1},Object.defineProperty(t,"__esModule",{value:!0})}))},{}],108:[function(t,e,r){!function(n,i){"object"==typeof r&&void 0!==e?i(r,t("d3-geo"),t("d3-array")):i(n.d3=n.d3||{},n.d3,n.d3)}(this,(function(t,e,r){"use strict";var n=Math.abs,i=Math.atan,a=Math.atan2,o=Math.cos,s=Math.exp,l=Math.floor,c=Math.log,u=Math.max,f=Math.min,h=Math.pow,p=Math.round,d=Math.sign||function(t){return t>0?1:t<0?-1:0},m=Math.sin,g=Math.tan,v=1e-6,y=Math.PI,x=y/2,b=y/4,_=Math.SQRT1_2,w=L(2),T=L(y),k=2*y,A=180/y,M=y/180;function S(t){return t>1?x:t<-1?-x:Math.asin(t)}function E(t){return t>1?0:t<-1?y:Math.acos(t)}function L(t){return t>0?Math.sqrt(t):0}function C(t){return(s(t)-s(-t))/2}function P(t){return(s(t)+s(-t))/2}function I(t){var e=g(t/2),r=2*c(o(t/2))/(e*e);function i(t,e){var n=o(t),i=o(e),a=m(e),s=i*n,l=-((1-s?c((1+s)/2)/(1-s):-.5)+r/(1+s));return[l*i*m(t),l*a]}return i.invert=function(e,i){var s,l=L(e*e+i*i),u=-t/2,f=50;if(!l)return[0,0];do{var h=u/2,p=o(h),d=m(h),g=d/p,y=-c(n(p));u-=s=(2/g*y-r*g-l)/(-y/(d*d)+1-r/(2*p*p))*(p<0?.7:1)}while(n(s)>v&&--f>0);var x=m(u);return[a(e*x,l*o(u)),S(i*x/l)]},i}function O(t,e){var r=o(e),n=function(t){return t?t/Math.sin(t):1}(E(r*o(t/=2)));return[2*r*m(t)*n,m(e)*n]}function z(t){var e=m(t),r=o(t),i=t>=0?1:-1,s=g(i*t),l=(1+e-r)/2;function c(t,n){var c=o(n),u=o(t/=2);return[(1+c)*m(t),(i*n>-a(u,s)-.001?0:10*-i)+l+m(n)*r-(1+c)*e*u]}return c.invert=function(t,c){var u=0,f=0,h=50;do{var p=o(u),d=m(u),g=o(f),y=m(f),x=1+g,b=x*d-t,_=l+y*r-x*e*p-c,w=x*p/2,T=-d*y,k=e*x*d/2,A=r*g+e*p*y,M=T*k-A*w,S=(_*T-b*A)/M/2,E=(b*k-_*w)/M;n(E)>2&&(E/=2),u-=S,f-=E}while((n(S)>v||n(E)>v)&&--h>0);return i*f>-a(o(u),s)-.001?[2*u,f]:null},c}function D(t,e){var r=g(e/2),n=L(1-r*r),i=1+n*o(t/=2),a=m(t)*n/i,s=r/i,l=a*a,c=s*s;return[4/3*a*(3+l-3*c),4/3*s*(3+3*l-c)]}O.invert=function(t,e){if(!(t*t+4*e*e>y*y+v)){var r=t,i=e,a=25;do{var s,l=m(r),c=m(r/2),u=o(r/2),f=m(i),h=o(i),p=m(2*i),d=f*f,g=h*h,x=c*c,b=1-g*u*u,_=b?E(h*u)*L(s=1/b):s=0,w=2*_*h*c-t,T=_*f-e,k=s*(g*x+_*h*u*d),A=s*(.5*l*p-2*_*f*c),M=.25*s*(p*c-_*f*g*l),S=s*(d*u+_*x*h),C=A*M-S*k;if(!C)break;var P=(T*A-w*S)/C,I=(w*M-T*k)/C;r-=P,i-=I}while((n(P)>v||n(I)>v)&&--a>0);return[r,i]}},D.invert=function(t,e){if(e*=3/8,!(t*=3/8)&&n(e)>1)return null;var r=1+t*t+e*e,i=L((r-L(r*r-4*e*e))/2),s=S(i)/3,l=i?function(t){return c(t+L(t*t-1))}(n(e/i))/3:function(t){return c(t+L(t*t+1))}(n(t))/3,u=o(s),f=P(l),h=f*f-u*u;return[2*d(t)*a(C(l)*u,.25-h),2*d(e)*a(f*m(s),.25+h)]};var R=L(8),F=c(1+w);function B(t,e){var r=n(e);return rx){var l=a(s[1],s[0]),c=L(s[0]*s[0]+s[1]*s[1]),u=r*p((l-x)/r)+x,f=a(m(l-=u),2-o(l));l=u+S(y/c*m(f))-f,s[0]=c*o(l),s[1]=c*m(l)}return s}return s.invert=function(t,n){var s=L(t*t+n*n);if(s>x){var l=a(n,t),c=r*p((l-x)/r)+x,u=l>c?-1:1,f=s*o(c-l),h=1/g(u*E((f-y)/L(y*(y-2*f)+s*s)));l=c+2*i((h+u*L(h*h-3))/3),t=s*o(l),n=s*m(l)}return e.geoAzimuthalEquidistantRaw.invert(t,n)},s}function j(t,r){if(arguments.length<2&&(r=t),1===r)return e.geoAzimuthalEqualAreaRaw;if(r===1/0)return U;function n(n,i){var a=e.geoAzimuthalEqualAreaRaw(n/r,i);return a[0]*=t,a}return n.invert=function(n,i){var a=e.geoAzimuthalEqualAreaRaw.invert(n/t,i);return a[0]*=r,a},n}function U(t,e){return[t*o(e)/o(e/=2),2*m(e)]}function V(t,e,r){var i,a,o,s=100;r=void 0===r?0:+r,e=+e;do{(a=t(r))===(o=t(r+v))&&(o=a+v),r-=i=-1*v*(a-e)/(a-o)}while(s-- >0&&n(i)>v);return s<0?NaN:r}function H(t,e,r){return void 0===e&&(e=40),void 0===r&&(r=1e-12),function(i,a,o,s){var l,c,u;o=void 0===o?0:+o,s=void 0===s?0:+s;for(var f=0;fl)o-=c/=2,s-=u/=2;else{l=m;var g=(o>0?-1:1)*r,v=(s>0?-1:1)*r,y=t(o+g,s),x=t(o,s+v),b=(y[0]-h[0])/g,_=(y[1]-h[1])/g,w=(x[0]-h[0])/v,T=(x[1]-h[1])/v,k=T*b-_*w,A=(n(k)<.5?.5:1)/k;if(o+=c=(d*w-p*T)*A,s+=u=(p*_-d*b)*A,n(c)0&&(i[1]*=1+a/1.5*i[0]*i[0]),i}return e.invert=H(e),e}function G(t,e){var r,i=t*m(e),a=30;do{e-=r=(e+m(e)-i)/(1+o(e))}while(n(r)>v&&--a>0);return e/2}function Y(t,e,r){function n(n,i){return[t*n*o(i=G(r,i)),e*m(i)]}return n.invert=function(n,i){return i=S(i/e),[n/(t*o(i)),S((2*i+m(2*i))/r)]},n}B.invert=function(t,e){if((a=n(e))1e-12&&--u>0);return[t/(o(l)*(R-1/m(l))),d(e)*l]},U.invert=function(t,e){var r=2*S(e/2);return[t*o(r/2)/o(r),r]};var W=Y(w/x,w,y);var X=2.00276,Z=1.11072;function J(t,e){var r=G(y,e);return[X*t/(1/o(e)+Z/o(r)),(e+w*m(r))/X]}function K(t){var r=0,n=e.geoProjectionMutator(t),i=n(r);return i.parallel=function(t){return arguments.length?n(r=t*M):r*A},i}function Q(t,e){return[t*o(e),e]}function $(t){if(!t)return Q;var e=1/g(t);function r(r,n){var i=e+t-n,a=i?r*o(n)/i:i;return[i*m(a),e-i*o(a)]}return r.invert=function(r,n){var i=L(r*r+(n=e-n)*n),s=e+t-i;return[i/o(s)*a(r,n),s]},r}function tt(t){function e(e,r){var n=x-r,i=n?e*t*m(n)/n:n;return[n*m(i)/t,x-n*o(i)]}return e.invert=function(e,r){var n=e*t,i=x-r,o=L(n*n+i*i),s=a(n,i);return[(o?o/m(o):1)*s/t,x-o]},e}J.invert=function(t,e){var r,i,a=X*e,s=e<0?-b:b,l=25;do{i=a-w*m(s),s-=r=(m(2*s)+2*s-y*m(i))/(2*o(2*s)+2+y*o(i)*w*o(s))}while(n(r)>v&&--l>0);return i=a-w*m(s),[t*(1/o(i)+Z/o(s))/X,i]},Q.invert=function(t,e){return[t/o(e),e]};var et=Y(1,4/y,y);function rt(t,e,r,i,s,l){var c,u=o(l);if(n(t)>1||n(l)>1)c=E(r*s+e*i*u);else{var f=m(t/2),h=m(l/2);c=2*S(L(f*f+e*i*h*h))}return n(c)>v?[c,a(i*m(l),e*s-r*i*u)]:[0,0]}function nt(t,e,r){return E((t*t+e*e-r*r)/(2*t*e))}function it(t){return t-2*y*l((t+y)/(2*y))}function at(t,e,r){for(var n,i=[[t[0],t[1],m(t[1]),o(t[1])],[e[0],e[1],m(e[1]),o(e[1])],[r[0],r[1],m(r[1]),o(r[1])]],a=i[2],s=0;s<3;++s,a=n)n=i[s],a.v=rt(n[1]-a[1],a[3],a[2],n[3],n[2],n[0]-a[0]),a.point=[0,0];var l=nt(i[0].v[0],i[2].v[0],i[1].v[0]),c=nt(i[0].v[0],i[1].v[0],i[2].v[0]),u=y-l;i[2].point[1]=0,i[0].point[0]=-(i[1].point[0]=i[0].v[0]/2);var f=[i[2].point[0]=i[0].point[0]+i[2].v[0]*o(l),2*(i[0].point[1]=i[1].point[1]=i[2].v[0]*m(l))];return function(t,e){var r,n=m(e),a=o(e),s=new Array(3);for(r=0;r<3;++r){var l=i[r];if(s[r]=rt(e-l[1],l[3],l[2],a,n,t-l[0]),!s[r][0])return l.point;s[r][1]=it(s[r][1]-l.v[1])}var h=f.slice();for(r=0;r<3;++r){var p=2==r?0:r+1,d=nt(i[r].v[0],s[r][0],s[p][0]);s[r][1]<0&&(d=-d),r?1==r?(d=c-d,h[0]-=s[r][0]*o(d),h[1]-=s[r][0]*m(d)):(d=u-d,h[0]+=s[r][0]*o(d),h[1]+=s[r][0]*m(d)):(h[0]+=s[r][0]*o(d),h[1]-=s[r][0]*m(d))}return h[0]/=3,h[1]/=3,h}}function ot(t){return t[0]*=M,t[1]*=M,t}function st(t,r,n){var i=e.geoCentroid({type:"MultiPoint",coordinates:[t,r,n]}),a=[-i[0],-i[1]],o=e.geoRotation(a),s=at(ot(o(t)),ot(o(r)),ot(o(n)));s.invert=H(s);var l=e.geoProjection(s).rotate(a),c=l.center;return delete l.rotate,l.center=function(t){return arguments.length?c(o(t)):o.invert(c())},l.clipAngle(90)}function lt(t,e){var r=L(1-m(e));return[2/T*t*r,T*(1-r)]}function ct(t){var e=g(t);function r(t,r){return[t,(t?t/m(t):1)*(m(r)*o(t)-e*o(r))]}return r.invert=e?function(t,r){t&&(r*=m(t)/t);var n=o(t);return[t,2*a(L(n*n+e*e-r*r)-n,e-r)]}:function(t,e){return[t,S(t?e*g(t)/t:e)]},r}lt.invert=function(t,e){var r=(r=e/T-1)*r;return[r>0?t*L(y/r)/2:0,S(1-r)]};var ut=L(3);function ft(t,e){return[ut*t*(2*o(2*e/3)-1)/T,ut*T*m(e/3)]}function ht(t){var e=o(t);function r(t,r){return[t*e,m(r)/e]}return r.invert=function(t,r){return[t/e,S(r*e)]},r}function pt(t){var e=o(t);function r(t,r){return[t*e,(1+e)*g(r/2)]}return r.invert=function(t,r){return[t/e,2*i(r/(1+e))]},r}function dt(t,e){var r=L(8/(3*y));return[r*t*(1-n(e)/y),r*e]}function mt(t,e){var r=L(4-3*m(n(e)));return[2/L(6*y)*t*r,d(e)*L(2*y/3)*(2-r)]}function gt(t,e){var r=L(y*(4+y));return[2/r*t*(1+L(1-4*e*e/(y*y))),4/r*e]}function vt(t,e){var r=(2+x)*m(e);e/=2;for(var i=0,a=1/0;i<10&&n(a)>v;i++){var s=o(e);e-=a=(e+m(e)*(s+2)-r)/(2*s*(1+s))}return[2/L(y*(4+y))*t*(1+o(e)),2*L(y/(4+y))*m(e)]}function yt(t,e){return[t*(1+o(e))/L(2+y),2*e/L(2+y)]}function xt(t,e){for(var r=(1+x)*m(e),i=0,a=1/0;i<10&&n(a)>v;i++)e-=a=(e+m(e)-r)/(1+o(e));return r=L(2+y),[t*(1+o(e))/r,2*e/r]}ft.invert=function(t,e){var r=3*S(e/(ut*T));return[T*t/(ut*(2*o(2*r/3)-1)),r]},dt.invert=function(t,e){var r=L(8/(3*y)),i=e/r;return[t/(r*(1-n(i)/y)),i]},mt.invert=function(t,e){var r=2-n(e)/L(2*y/3);return[t*L(6*y)/(2*r),d(e)*S((4-r*r)/3)]},gt.invert=function(t,e){var r=L(y*(4+y))/2;return[t*r/(1+L(1-e*e*(4+y)/(4*y))),e*r/2]},vt.invert=function(t,e){var r=e*L((4+y)/y)/2,n=S(r),i=o(n);return[t/(2/L(y*(4+y))*(1+i)),S((n+r*(i+2))/(2+x))]},yt.invert=function(t,e){var r=L(2+y),n=e*r/2;return[r*t/(1+o(n)),n]},xt.invert=function(t,e){var r=1+x,n=L(r/2);return[2*t*n/(1+o(e*=n)),S((e+m(e))/r)]};var bt=3+2*w;function _t(t,e){var r=m(t/=2),n=o(t),a=L(o(e)),s=o(e/=2),l=m(e)/(s+w*n*a),u=L(2/(1+l*l)),f=L((w*s+(n+r)*a)/(w*s+(n-r)*a));return[bt*(u*(f-1/f)-2*c(f)),bt*(u*l*(f+1/f)-2*i(l))]}_t.invert=function(t,e){if(!(r=D.invert(t/1.2,1.065*e)))return null;var r,a=r[0],s=r[1],l=20;t/=bt,e/=bt;do{var h=a/2,p=s/2,d=m(h),g=o(h),y=m(p),b=o(p),T=o(s),k=L(T),A=y/(b+w*g*k),M=A*A,S=L(2/(1+M)),E=(w*b+(g+d)*k)/(w*b+(g-d)*k),C=L(E),P=C-1/C,I=C+1/C,O=S*P-2*c(C)-t,z=S*A*I-2*i(A)-e,R=y&&_*k*d*M/y,F=(w*g*b+k)/(2*(b+w*g*k)*(b+w*g*k)*k),B=-.5*A*S*S*S,N=B*R,j=B*F,U=(U=2*b+w*k*(g-d))*U*C,V=(w*g*b*k+T)/U,H=-w*d*y/(k*U),q=P*N-2*V/C+S*(V+V/E),G=P*j-2*H/C+S*(H+H/E),Y=A*I*N-2*R/(1+M)+S*I*R+S*A*(V-V/E),W=A*I*j-2*F/(1+M)+S*I*F+S*A*(H-H/E),X=G*Y-W*q;if(!X)break;var Z=(z*G-O*W)/X,J=(O*Y-z*q)/X;a-=Z,s=u(-x,f(x,s-J))}while((n(Z)>v||n(J)>v)&&--l>0);return n(n(s)-x)s){var d=L(h),g=a(f,u),b=i*p(g/i),_=g-b,w=t*o(_),T=(t*m(_)-_*m(w))/(x-w),k=Lt(_,T),A=(y-t)/Ct(k,w,y);u=d;var M,S=50;do{u-=M=(t+Ct(k,w,u)*A-d)/(k(u)*A)}while(n(M)>v&&--S>0);f=_*m(u),us){var u=L(c),f=a(l,r),h=i*p(f/i),d=f-h;r=u*o(d),l=u*m(d);for(var g=r-x,v=m(r),b=l/v,_=rv||n(p)>v)&&--x>0);return[d,g]},u}Tt.invert=function(t,e){var r=e/(1+wt);return[t&&t/(wt*L(1-r*r)),2*i(r)]},kt.invert=function(t,e){var r=i(e/T),n=o(r),a=2*r;return[t*T/2/(o(a)*n*n),a]};var It=Pt(2.8284,-1.6988,.75432,-.18071,1.76003,-.38914,.042555);var Ot=Pt(2.583819,-.835827,.170354,-.038094,1.543313,-.411435,.082742);var zt=Pt(5/6*y,-.62636,-.0344,0,1.3493,-.05524,0,.045);function Dt(t,e){var r=t*t,n=e*e;return[t*(1-.162388*n)*(.87-952426e-9*r*r),e*(1+n/12)]}Dt.invert=function(t,e){var r,i=t,a=e,o=50;do{var s=a*a;a-=r=(a*(1+s/12)-e)/(1+s/4)}while(n(r)>v&&--o>0);o=50,t/=1-.162388*s;do{var l=(l=i*i)*l;i-=r=(i*(.87-952426e-9*l)-t)/(.87-.00476213*l)}while(n(r)>v&&--o>0);return[i,a]};var Rt=Pt(2.6516,-.76534,.19123,-.047094,1.36289,-.13965,.031762);function Ft(t){var e=t(x,0)[0]-t(-x,0)[0];function r(r,n){var i=r>0?-.5:.5,a=t(r+i*y,n);return a[0]-=i*e,a}return t.invert&&(r.invert=function(r,n){var i=r>0?-.5:.5,a=t.invert(r+i*e,n),o=a[0]-i*y;return o<-y?o+=2*y:o>y&&(o-=2*y),a[0]=o,a}),r}function Bt(t,e){var r=d(t),i=d(e),s=o(e),l=o(t)*s,c=m(t)*s,u=m(i*e);t=n(a(c,u)),e=S(l),n(t-x)>v&&(t%=x);var f=function(t,e){if(e===x)return[0,0];var r,i,a=m(e),s=a*a,l=s*s,c=1+l,u=1+3*l,f=1-l,h=S(1/L(c)),p=f+s*c*h,d=(1-a)/p,g=L(d),b=d*c,_=L(b),w=g*f;if(0===t)return[0,-(w+s*_)];var T,k=o(e),A=1/k,M=2*a*k,E=(-p*k-(-3*s+h*u)*M*(1-a))/(p*p),C=-A*M,P=-A*(s*c*E+d*u*M),I=-2*A*(f*(.5*E/g)-2*s*g*M),O=4*t/y;if(t>.222*y||e.175*y){if(r=(w+s*L(b*(1+l)-w*w))/(1+l),t>y/4)return[r,r];var z=r,D=.5*r;r=.5*(D+z),i=50;do{var R=L(b-r*r),F=r*(I+C*R)+P*S(r/_)-O;if(!F)break;F<0?D=r:z=r,r=.5*(D+z)}while(n(z-D)>v&&--i>0)}else{r=v,i=25;do{var B=r*r,N=L(b-B),j=I+C*N,U=r*j+P*S(r/_)-O,V=j+(P-C*B)/N;r-=T=N?U/V:0}while(n(T)>v&&--i>0)}return[r,-w-s*L(b-r*r)]}(t>y/4?x-t:t,e);return t>y/4&&(u=f[0],f[0]=-f[1],f[1]=-u),f[0]*=r,f[1]*=-i,f}function Nt(t,e){var r,a,l,c,u,f;if(e=1-v)return r=(1-e)/4,l=1/(a=P(t)),[(c=((f=s(2*(f=t)))-1)/(f+1))+r*((u=a*C(t))-t)/(a*a),l-r*c*l*(u-t),l+r*c*l*(u+t),2*i(s(t))-x+r*(u-t)/a];var h=[1,0,0,0,0,0,0,0,0],p=[L(e),0,0,0,0,0,0,0,0],d=0;for(a=L(1-e),u=1;n(p[d]/h[d])>v&&d<8;)r=h[d++],p[d]=(r-a)/2,h[d]=(r+a)/2,a=L(r*a),u*=2;l=u*h[d]*t;do{l=(S(c=p[d]*m(a=l)/h[d])+l)/2}while(--d);return[m(l),c=o(l),c/o(l-a),l]}function jt(t,e){if(!e)return t;if(1===e)return c(g(t/2+b));for(var r=1,a=L(1-e),o=L(e),s=0;n(o)>v;s++){if(t%y){var l=i(a*g(t)/r);l<0&&(l+=y),t+=l+~~(t/y)*y}else t+=t;o=(r+a)/2,a=L(r*a),o=((r=o)-a)/2}return t/(h(2,s)*r)}function Ut(t,e){var r=(w-1)/(w+1),l=L(1-r*r),u=jt(x,l*l),f=c(g(y/4+n(e)/2)),h=s(-1*f)/L(r),p=function(t,e){var r=t*t,n=e+1,i=1-r-e*e;return[.5*((t>=0?x:-x)-a(i,2*t)),-.25*c(i*i+4*r)+.5*c(n*n+r)]}(h*o(-1*t),h*m(-1*t)),v=function(t,e,r){var a=n(t),o=C(n(e));if(a){var s=1/m(a),l=1/(g(a)*g(a)),c=-(l+r*(o*o*s*s)-1+r),u=(-c+L(c*c-4*((r-1)*l)))/2;return[jt(i(1/L(u)),r)*d(t),jt(i(L((u/l-1)/r)),1-r)*d(e)]}return[0,jt(i(o),1-r)*d(e)]}(p[0],p[1],l*l);return[-v[1],(e>=0?1:-1)*(.5*u-v[0])]}function Vt(t){var e=m(t),r=o(t),i=Ht(t);function s(t,a){var s=i(t,a);t=s[0],a=s[1];var l=m(a),c=o(a),u=o(t),f=E(e*l+r*c*u),h=m(f),p=n(h)>v?f/h:1;return[p*r*m(t),(n(t)>x?p:-p)*(e*c-r*l*u)]}return i.invert=Ht(-t),s.invert=function(t,r){var n=L(t*t+r*r),s=-m(n),l=o(n),c=n*l,u=-r*s,f=n*e,h=L(c*c+u*u-f*f),p=a(c*f+u*h,u*f-c*h),d=(n>x?-1:1)*a(t*s,n*o(p)*l+r*m(p)*s);return i.invert(d,p)},s}function Ht(t){var e=m(t),r=o(t);return function(t,n){var i=o(n),s=o(t)*i,l=m(t)*i,c=m(n);return[a(l,s*r-c*e),S(c*r+s*e)]}}Bt.invert=function(t,e){n(t)>1&&(t=2*d(t)-t),n(e)>1&&(e=2*d(e)-e);var r=d(t),i=d(e),s=-r*t,l=-i*e,c=l/s<1,u=function(t,e){var r=0,i=1,a=.5,s=50;for(;;){var l=a*a,c=L(a),u=S(1/L(1+l)),f=1-l+a*(1+l)*u,h=(1-c)/f,p=L(h),d=h*(1+l),m=p*(1-l),g=L(d-t*t),v=e+m+a*g;if(n(i-r)<1e-12||0==--s||0===v)break;v>0?r=a:i=a,a=.5*(r+i)}if(!s)return null;var x=S(c),b=o(x),_=1/b,w=2*c*b,T=(-f*b-(-3*a+u*(1+3*l))*w*(1-c))/(f*f);return[y/4*(t*(-2*_*(.5*T/p*(1-l)-2*a*p*w)+-_*w*g)+-_*(a*(1+l)*T+h*(1+3*l)*w)*S(t/L(d))),x]}(c?l:s,c?s:l),f=u[0],h=u[1],p=o(h);return c&&(f=-x-f),[r*(a(m(f)*p,-m(h))+y),i*S(o(f)*p)]},Ut.invert=function(t,e){var r,n,o,l,u,f,h=(w-1)/(w+1),p=L(1-h*h),d=jt(x,p*p),m=(n=-t,o=p*p,(r=.5*d-e)?(l=Nt(r,o),n?(f=(u=Nt(n,1-o))[1]*u[1]+o*l[0]*l[0]*u[0]*u[0],[[l[0]*u[2]/f,l[1]*l[2]*u[0]*u[1]/f],[l[1]*u[1]/f,-l[0]*l[2]*u[0]*u[2]/f],[l[2]*u[1]*u[2]/f,-o*l[0]*l[1]*u[0]/f]]):[[l[0],0],[l[1],0],[l[2],0]]):[[0,(u=Nt(n,1-o))[0]/u[1]],[1/u[1],0],[u[2]/u[1],0]]),g=function(t,e){var r=e[0]*e[0]+e[1]*e[1];return[(t[0]*e[0]+t[1]*e[1])/r,(t[1]*e[0]-t[0]*e[1])/r]}(m[0],m[1]);return[a(g[1],g[0])/-1,2*i(s(-.5*c(h*g[0]*g[0]+h*g[1]*g[1])))-x]};var qt=S(1-1/3)*A,Gt=ht(0);function Yt(t){var e=qt*M,r=lt(y,e)[0]-lt(-y,e)[0],i=Gt(0,e)[1],a=lt(0,e)[1],o=T-a,s=k/t,c=4/k,h=i+o*o*4/k;function p(p,d){var m,g=n(d);if(g>e){var v=f(t-1,u(0,l((p+y)/s)));(m=lt(p+=y*(t-1)/t-v*s,g))[0]=m[0]*k/r-k*(t-1)/(2*t)+v*k/t,m[1]=i+4*(m[1]-a)*o/k,d<0&&(m[1]=-m[1])}else m=Gt(p,d);return m[0]*=c,m[1]/=h,m}return p.invert=function(e,p){e/=c;var d=n(p*=h);if(d>i){var m=f(t-1,u(0,l((e+y)/s)));e=(e+y*(t-1)/t-m*s)*r/k;var g=lt.invert(e,.25*(d-i)*k/o+a);return g[0]-=y*(t-1)/t-m*s,p<0&&(g[1]=-g[1]),g}return Gt.invert(e,p)},p}function Wt(t,e){return[t,1&e?90-v:qt]}function Xt(t,e){return[t,1&e?-90+v:-qt]}function Zt(t){return[t[0]*(1-v),t[1]]}function Jt(t){var e,r=1+t,i=S(m(1/r)),s=2*L(y/(e=y+4*i*r)),l=.5*s*(r+L(t*(2+t))),c=t*t,u=r*r;function f(f,h){var p,d,g=1-m(h);if(g&&g<2){var v,b=x-h,_=25;do{var w=m(b),T=o(b),k=i+a(w,r-T),A=1+u-2*r*T;b-=v=(b-c*i-r*w+A*k-.5*g*e)/(2*r*w*k)}while(n(v)>1e-12&&--_>0);p=s*L(A),d=f*k/y}else p=s*(t+g),d=f*i/y;return[p*m(d),l-p*o(d)]}return f.invert=function(t,n){var o=t*t+(n-=l)*n,f=(1+u-o/(s*s))/(2*r),h=E(f),p=m(h),d=i+a(p,r-f);return[S(t/L(o))*y/d,S(1-2*(h-c*i-r*p+(1+u-2*r*f)*d)/e)]},f}function Kt(t,e){return e>-.7109889596207567?((t=W(t,e))[1]+=.0528035274542,t):Q(t,e)}function Qt(t,e){return n(e)>.7109889596207567?((t=W(t,e))[1]-=e>0?.0528035274542:-.0528035274542,t):Q(t,e)}function $t(t,e,r,n){var i=L(4*y/(2*r+(1+t-e/2)*m(2*r)+(t+e)/2*m(4*r)+e/2*m(6*r))),a=L(n*m(r)*L((1+t*o(2*r)+e*o(4*r))/(1+t+e))),s=r*c(1);function l(r){return L(1+t*o(2*r)+e*o(4*r))}function c(n){var i=n*r;return(2*i+(1+t-e/2)*m(2*i)+(t+e)/2*m(4*i)+e/2*m(6*i))/r}function u(t){return l(t)*m(t)}var f=function(t,e){var n=r*V(c,s*m(e)/r,e/y);isNaN(n)&&(n=r*d(e));var u=i*l(n);return[u*a*t/y*o(n),u/a*m(n)]};return f.invert=function(t,e){var n=V(u,e*a/i);return[t*y/(o(n)*i*a*l(n)),S(r*c(n/r)/s)]},0===r&&(i=L(n/y),(f=function(t,e){return[t*i,m(e)/i]}).invert=function(t,e){return[t/i,S(e*i)]}),f}function te(t,e,r,n,i){void 0===n&&(n=1e-8),void 0===i&&(i=20);var a=t(e),o=t(.5*(e+r)),s=t(r);return function t(e,r,n,i,a,o,s,l,c,u,f){if(f.nanEncountered)return NaN;var h,p,d,m,g,v,y,x,b,_;if(p=e(r+.25*(h=n-r)),d=e(n-.25*h),isNaN(p))f.nanEncountered=!0;else{if(!isNaN(d))return _=((v=(m=h*(i+4*p+a)/12)+(g=h*(a+4*d+o)/12))-s)/15,u>c?(f.maxDepthCount++,v+_):Math.abs(_)t?r=n:e=n,n=e+r>>1}while(n>e);var i=c[n+1]-c[n];return i&&(i=(t-c[n+1])/i),(n+1+i)/s}var p=2*f(1)/y*o/r,g=function(t,e){var r=f(n(m(e))),a=i(r)*t;return r/=p,[a,e>=0?r:-r]};return g.invert=function(t,e){var r;return n(e*=p)<1&&(r=d(e)*S(a(n(e))*o)),[t/i(n(e)),r]},g}function re(t,e){return n(t[0]-e[0])=0;--l)n=(e=t[1][l])[0][0],i=e[0][1],a=e[1][1],o=e[2][0],s=e[2][1],c.push(ne([[o-v,s-v],[o-v,a+v],[n+v,a+v],[n+v,i-v]],30));return{type:"Polygon",coordinates:[r.merge(c)]}}function ae(t,r,n){var i,a;function o(e,n){for(var i=n<0?-1:1,a=r[+(n<0)],o=0,s=a.length-1;oa[o][2][0];++o);var l=t(e-a[o][1][0],n);return l[0]+=t(a[o][1][0],i*n>i*a[o][0][1]?a[o][0][1]:n)[0],l}n?o.invert=n(o):t.invert&&(o.invert=function(e,n){for(var i=a[+(n<0)],s=r[+(n<0)],l=0,c=i.length;lo&&(r=a,a=o,o=r),[[n,a],[i,o]]}))})),s):r.map((function(t){return t.map((function(t){return[[t[0][0]*A,t[0][1]*A],[t[1][0]*A,t[1][1]*A],[t[2][0]*A,t[2][1]*A]]}))}))},null!=r&&s.lobes(r),s}Kt.invert=function(t,e){return e>-.7109889596207567?W.invert(t,e-.0528035274542):Q.invert(t,e)},Qt.invert=function(t,e){return n(e)>.7109889596207567?W.invert(t,e+(e>0?.0528035274542:-.0528035274542)):Q.invert(t,e)};var oe=[[[[-180,0],[-100,90],[-40,0]],[[-40,0],[30,90],[180,0]]],[[[-180,0],[-160,-90],[-100,0]],[[-100,0],[-60,-90],[-20,0]],[[-20,0],[20,-90],[80,0]],[[80,0],[140,-90],[180,0]]]];var se=[[[[-180,0],[-100,90],[-40,0]],[[-40,0],[30,90],[180,0]]],[[[-180,0],[-160,-90],[-100,0]],[[-100,0],[-60,-90],[-20,0]],[[-20,0],[20,-90],[80,0]],[[80,0],[140,-90],[180,0]]]];var le=[[[[-180,0],[-100,90],[-40,0]],[[-40,0],[30,90],[180,0]]],[[[-180,0],[-160,-90],[-100,0]],[[-100,0],[-60,-90],[-20,0]],[[-20,0],[20,-90],[80,0]],[[80,0],[140,-90],[180,0]]]];var ce=[[[[-180,0],[-90,90],[0,0]],[[0,0],[90,90],[180,0]]],[[[-180,0],[-90,-90],[0,0]],[[0,0],[90,-90],[180,0]]]];var ue=[[[[-180,35],[-30,90],[0,35]],[[0,35],[30,90],[180,35]]],[[[-180,-10],[-102,-90],[-65,-10]],[[-65,-10],[5,-90],[77,-10]],[[77,-10],[103,-90],[180,-10]]]];var fe=[[[[-180,0],[-110,90],[-40,0]],[[-40,0],[0,90],[40,0]],[[40,0],[110,90],[180,0]]],[[[-180,0],[-110,-90],[-40,0]],[[-40,0],[0,-90],[40,0]],[[40,0],[110,-90],[180,0]]]];function he(t,e){return[3/k*t*L(y*y/3-e*e),e]}function pe(t){function e(e,r){if(n(n(r)-x)2)return null;var o=(e/=2)*e,s=(r/=2)*r,l=2*r/(1+o+s);return l=h((1+l)/(1-l),1/t),[a(2*e,1-o-s)/t,S((l-1)/(l+1))]},e}he.invert=function(t,e){return[k/3*t/L(y*y/3-e*e),e]};var de=y/w;function me(t,e){return[t*(1+L(o(e)))/2,e/(o(e/2)*o(t/6))]}function ge(t,e){var r=t*t,n=e*e;return[t*(.975534+n*(-.0143059*r-.119161+-.0547009*n)),e*(1.00384+r*(.0802894+-.02855*n+199025e-9*r)+n*(.0998909+-.0491032*n))]}function ve(t,e){return[m(t)/o(e),g(e)*o(t)]}function ye(t){var e=o(t),r=g(b+t/2);function i(i,a){var o=a-t,s=n(o)=0;)h=(f=t[u])[0]+l*(i=h)-c*p,p=f[1]+l*p+c*i;return[h=l*(i=h)-c*p,p=l*p+c*i]}return r.invert=function(r,s){var l=20,c=r,u=s;do{for(var f,h=e,p=t[h],d=p[0],g=p[1],v=0,y=0;--h>=0;)v=d+c*(f=v)-u*y,y=g+c*y+u*f,d=(p=t[h])[0]+c*(f=d)-u*g,g=p[1]+c*g+u*f;var x,b,_=(v=d+c*(f=v)-u*y)*v+(y=g+c*y+u*f)*y;c-=x=((d=c*(f=d)-u*g-r)*v+(g=c*g+u*f-s)*y)/_,u-=b=(g*v-d*y)/_}while(n(x)+n(b)>1e-12&&--l>0);if(l){var w=L(c*c+u*u),T=2*i(.5*w),k=m(T);return[a(c*k,w*o(T)),w?S(u*k/w):0]}},r}me.invert=function(t,e){var r=n(t),i=n(e),a=v,s=x;iv||n(b)>v)&&--a>0);return a&&[r,i]},ve.invert=function(t,e){var r=t*t,n=e*e+1,i=r+n,a=t?_*L((i-L(i*i-4*r))/r):1/L(n);return[S(t*a),d(e)*E(a)]},xe.invert=function(t,e){return[t,2.5*i(s(.8*e))-.625*y]};var _e=[[.9972523,0],[.0052513,-.0041175],[.0074606,.0048125],[-.0153783,-.1968253],[.0636871,-.1408027],[.3660976,-.2937382]],we=[[.98879,0],[0,0],[-.050909,0],[0,0],[.075528,0]],Te=[[.984299,0],[.0211642,.0037608],[-.1036018,-.0575102],[-.0329095,-.0320119],[.0499471,.1223335],[.026046,.0899805],[7388e-7,-.1435792],[.0075848,-.1334108],[-.0216473,.0776645],[-.0225161,.0853673]],ke=[[.9245,0],[0,0],[.01943,0]],Ae=[[.721316,0],[0,0],[-.00881625,-.00617325]];function Me(t,r){var n=e.geoProjection(be(t)).rotate(r).clipAngle(90),i=e.geoRotation(r),a=n.center;return delete n.rotate,n.center=function(t){return arguments.length?a(i(t)):i.invert(a())},n}var Se=L(6),Ee=L(7);function Le(t,e){var r=S(7*m(e)/(3*Se));return[Se*t*(2*o(2*r/3)-1)/Ee,9*m(r/3)/Ee]}function Ce(t,e){for(var r,i=(1+_)*m(e),a=e,s=0;s<25&&(a-=r=(m(a/2)+m(a)-i)/(.5*o(a/2)+o(a)),!(n(r)1e-12&&--l>0);return[t/(.84719-.13063*(i=s*s)+(o=i*(a=i*i))*o*(.05494*i-.04515-.02326*a+.00331*o)),s]},Oe.invert=function(t,e){for(var r=e/2,i=0,a=1/0;i<10&&n(a)>v;++i){var s=o(e/2);e-=a=(e-g(e/2)-r)/(1-.5/(s*s))}return[2*t/(1+o(e)),e]};var ze=[[[[-180,0],[-90,90],[0,0]],[[0,0],[90,90],[180,0]]],[[[-180,0],[-90,-90],[0,0]],[[0,0],[90,-90],[180,0]]]];function De(t,e){var r=m(e),i=o(e),a=d(t);if(0===t||n(e)===x)return[0,e];if(0===e)return[t,0];if(n(t)===x)return[t*i,x*r];var s=y/(2*t)-2*t/y,l=2*e/y,c=(1-l*l)/(r-l),u=s*s,f=c*c,h=1+u/f,p=1+f/u,g=(s*r/c-s/2)/h,v=(f*r/u+c/2)/p,b=v*v-(f*r*r/u+c*r-1)/p;return[x*(g+L(g*g+i*i/h)*a),x*(v+L(b<0?0:b)*d(-e*s)*a)]}De.invert=function(t,e){var r=(t/=x)*t,n=r+(e/=x)*e,i=y*y;return[t?(n-1+L((1-n)*(1-n)+4*r))/(2*t)*x:0,V((function(t){return n*(y*m(t)-2*t)*y+4*t*t*(e-m(t))+2*y*t-i*e}),0)]};function Re(t,e){var r=e*e;return[t,e*(1.0148+r*r*(.23185+r*(.02406*r-.14499)))]}function Fe(t,e){if(n(e)=0;)if(n=e[s],r[0]===n[0]&&r[1]===n[1]){if(a)return[a,r];a=r}}}(e.face,r.face),i=Be(n.map(r.project),n.map(e.project));e.transform=r.transform?Ne(r.transform,i):i;for(var a=r.edges,o=0,s=a.length;o1.790857183?e=1.790857183:e<-1.790857183&&(e=-1.790857183);var r,i=e;do{var a=i*i;i-=r=(i*(1.0148+a*a*(.23185+a*(.02406*a-.14499)))-e)/(1.0148+a*a*(5*.23185+a*(.21654*a-1.01493)))}while(n(r)>v);return[t,i]},Fe.invert=function(t,e){if(n(e)v&&--s>0);return l=g(a),[(n(e)n^p>n&&r<(h-c)*(n-u)/(p-u)+c&&(i=!i)}return i}(t[0],r))return t.push(e),!0}))||t.push([e])})),nr=[],t.length?t.length>1?{type:"MultiPolygon",coordinates:t}:{type:"Polygon",coordinates:t[0]}:null}};function sr(t){var r=t(x,0)[0]-t(-x,0)[0];function i(e,i){var a=n(e)0?e-y:e+y,i),s=(o[0]-o[1])*_,l=(o[0]+o[1])*_;if(a)return[s,l];var c=r*_,u=s>0^l>0?-1:1;return[u*s-d(l)*c,u*l-d(s)*c]}return t.invert&&(i.invert=function(e,i){var a=(e+i)*_,o=(i-e)*_,s=n(a)<.5*r&&n(o)<.5*r;if(!s){var l=r*_,c=a>0^o>0?-1:1,u=-c*e+(o>0?1:-1)*l,f=-c*i+(a>0?1:-1)*l;a=(-u-f)*_,o=(u-f)*_}var h=t.invert(a,o);return s||(h[0]+=a>0?y:-y),h}),e.geoProjection(i).rotate([-90,-90,45]).clipAngle(179.999)}function lr(){return sr(Ut).scale(111.48)}function cr(t){var e=m(t);function r(r,n){var a=e?g(r*e/2)/e:r/2;if(!n)return[2*a,-t];var s=2*i(a*m(n)),l=1/g(n);return[m(s)*l,n+(1-o(s))*l-t]}return r.invert=function(r,a){if(n(a+=t)v&&--u>0);var d=r*(f=g(c)),x=g(n(a)0?x:-x)*(h+o*(d-c)/2+o*o*(d-2*h+c)/2)]}function hr(t,e){var r=function(t){function e(e,r){var n=o(r),i=(t-1)/(t-n*o(e));return[i*n*m(e),i*m(r)]}return e.invert=function(e,r){var n=e*e+r*r,i=L(n),o=(t-L(1-n*(t+1)/(t-1)))/((t-1)/i+i/(t-1));return[a(e*o,i*L(1-o*o)),i?S(r*o/i):0]},e}(t);if(!e)return r;var n=o(e),i=m(e);function s(e,a){var o=r(e,a),s=o[1],l=s*i/(t-1)+n;return[o[0]*n/l,s/l]}return s.invert=function(e,a){var o=(t-1)/(t-1-a*i);return r.invert(o*e,o*a*n)},s}ur.forEach((function(t){t[1]*=1.0144})),fr.invert=function(t,e){var r=e/x,i=90*r,a=f(18,n(i/5)),o=u(0,l(a));do{var s=ur[o][1],c=ur[o+1][1],h=ur[f(19,o+2)][1],p=h-s,d=h-2*c+s,m=2*(n(r)-c)/p,g=d/p,v=m*(1-g*m*(1-2*g*m));if(v>=0||1===o){i=(e>=0?5:-5)*(v+a);var y,b=50;do{v=(a=f(18,n(i)/5))-(o=l(a)),s=ur[o][1],c=ur[o+1][1],h=ur[f(19,o+2)][1],i-=(y=(e>=0?x:-x)*(c+v*(h-s)/2+v*v*(h-2*c+s)/2)-e)*A}while(n(y)>1e-12&&--b>0);break}}while(--o>=0);var _=ur[o][0],w=ur[o+1][0],T=ur[f(19,o+2)][0];return[t/(w+v*(T-_)/2+v*v*(T-2*w+_)/2),i*M]};var pr=-179.9999,dr=179.9999,mr=-89.9999;function gr(t){return t.length>0}function vr(t){return-90===t||90===t?[0,t]:[-180,(e=t,Math.floor(1e4*e)/1e4)];var e}function yr(t){var e=t[0],r=t[1],n=!1;return e<=pr?(e=-180,n=!0):e>=dr&&(e=180,n=!0),r<=mr?(r=-90,n=!0):r>=89.9999&&(r=90,n=!0),n?[e,r]:t}function xr(t){return t.map(yr)}function br(t,e,r){for(var n=0,i=t.length;n=dr||u<=mr||u>=89.9999){a[o]=yr(l);for(var f=o+1;fpr&&pmr&&d<89.9999)break}if(f===o+1)continue;if(o){var m={index:-1,polygon:e,ring:a.slice(0,o+1)};m.ring[m.ring.length-1]=vr(u),r[r.length-1]=m}else r.pop();if(f>=s)break;r.push({index:-1,polygon:e,ring:a=a.slice(f-1)}),a[0]=vr(a[0][1]),o=-1,s=a.length}}}}function _r(t){var e,r,n,i,a,o,s=t.length,l={},c={};for(e=0;e0?y-l:l)*A],u=e.geoProjection(t(s)).rotate(c),f=e.geoRotation(c),h=u.center;return delete u.rotate,u.center=function(t){return arguments.length?h(f(t)):f.invert(h())},u.clipAngle(90)}function Mr(t){var r=o(t);function n(t,n){var i=e.geoGnomonicRaw(t,n);return i[0]*=r,i}return n.invert=function(t,n){return e.geoGnomonicRaw.invert(t/r,n)},n}function Sr(t,e){return Ar(Mr,t,e)}function Er(t){if(!(t*=2))return e.geoAzimuthalEquidistantRaw;var r=-t/2,n=-r,i=t*t,s=g(n),l=.5/m(n);function c(e,a){var s=E(o(a)*o(e-r)),l=E(o(a)*o(e-n));return[((s*=s)-(l*=l))/(2*t),(a<0?-1:1)*L(4*i*l-(i-s+l)*(i-s+l))/(2*t)]}return c.invert=function(t,e){var i,c,u=e*e,f=o(L(u+(i=t+r)*i)),h=o(L(u+(i=t+n)*i));return[a(c=f-h,i=(f+h)*s),(e<0?-1:1)*E(L(i*i+c*c)*l)]},c}function Lr(t,e){return Ar(Er,t,e)}function Cr(t,e){if(n(e)v&&--l>0);return[d(t)*(L(a*a+4)+a)*y/4,x*s]};var Rr=4*y+3*L(3),Fr=2*L(2*y*L(3)/Rr),Br=Y(Fr*L(3)/y,Fr,Rr/6);function Nr(t,e){return[t*L(1-3*e*e/(y*y)),e]}function jr(t,e){var r=o(e),n=o(t)*r,i=1-n,s=o(t=a(m(t)*r,-m(e))),l=m(t);return[l*(r=L(1-n*n))-s*i,-s*r-l*i]}function Ur(t,e){var r=O(t,e);return[(r[0]+t/x)/2,(r[1]+e)/2]}Nr.invert=function(t,e){return[t/L(1-3*e*e/(y*y)),e]},jr.invert=function(t,e){var r=(t*t+e*e)/-2,n=L(-r*(2+r)),i=e*r+t*n,o=t*r-e*n,s=L(o*o+i*i);return[a(n*i,s*(1+r)),s?-S(n*o/s):0]},Ur.invert=function(t,e){var r=t,i=e,a=25;do{var s,l=o(i),c=m(i),u=m(2*i),f=c*c,h=l*l,p=m(r),d=o(r/2),g=m(r/2),y=g*g,b=1-h*d*d,_=b?E(l*d)*L(s=1/b):s=0,w=.5*(2*_*l*g+r/x)-t,T=.5*(_*c+i)-e,k=.5*s*(h*y+_*l*d*f)+.5/x,A=s*(p*u/4-_*c*g),M=.125*s*(u*g-_*c*h*p),S=.5*s*(f*d+_*y*l)+.5,C=A*M-S*k,P=(T*A-w*S)/C,I=(w*M-T*k)/C;r-=P,i-=I}while((n(P)>v||n(I)>v)&&--a>0);return[r,i]},t.geoNaturalEarth=e.geoNaturalEarth1,t.geoNaturalEarthRaw=e.geoNaturalEarth1Raw,t.geoAiry=function(){var t=x,r=e.geoProjectionMutator(I),n=r(t);return n.radius=function(e){return arguments.length?r(t=e*M):t*A},n.scale(179.976).clipAngle(147)},t.geoAiryRaw=I,t.geoAitoff=function(){return e.geoProjection(O).scale(152.63)},t.geoAitoffRaw=O,t.geoArmadillo=function(){var t=20*M,r=t>=0?1:-1,n=g(r*t),i=e.geoProjectionMutator(z),s=i(t),l=s.stream;return s.parallel=function(e){return arguments.length?(n=g((r=(t=e*M)>=0?1:-1)*t),i(t)):t*A},s.stream=function(e){var i=s.rotate(),c=l(e),u=(s.rotate([0,0]),l(e)),f=s.precision();return s.rotate(i),c.sphere=function(){u.polygonStart(),u.lineStart();for(var e=-180*r;r*e<180;e+=90*r)u.point(e,90*r);if(t)for(;r*(e-=3*r*f)>=-180;)u.point(e,r*-a(o(e*M/2),n)*A);u.lineEnd(),u.polygonEnd()},c},s.scale(218.695).center([0,28.0974])},t.geoArmadilloRaw=z,t.geoAugust=function(){return e.geoProjection(D).scale(66.1603)},t.geoAugustRaw=D,t.geoBaker=function(){return e.geoProjection(B).scale(112.314)},t.geoBakerRaw=B,t.geoBerghaus=function(){var t=5,r=e.geoProjectionMutator(N),n=r(t),i=n.stream,s=-o(.01*M),l=m(.01*M);return n.lobes=function(e){return arguments.length?r(t=+e):t},n.stream=function(e){var r=n.rotate(),c=i(e),u=(n.rotate([0,0]),i(e));return n.rotate(r),c.sphere=function(){u.polygonStart(),u.lineStart();for(var e=0,r=360/t,n=2*y/t,i=90-180/t,c=x;e=0;)t.point((e=r[i])[0],e[1]);t.lineEnd(),t.polygonEnd()},t},n.scale(79.4187).parallel(45).clipAngle(179.999)},t.geoHammerRetroazimuthalRaw=Vt,t.geoHealpix=function(){var t=4,n=e.geoProjectionMutator(Yt),i=n(t),a=i.stream;return i.lobes=function(e){return arguments.length?n(t=+e):t},i.stream=function(n){var o=i.rotate(),s=a(n),l=(i.rotate([0,0]),a(n));return i.rotate(o),s.sphere=function(){var n,i;e.geoStream((n=180/t,i=[].concat(r.range(-180,180+n/2,n).map(Wt),r.range(180,-180-n/2,-n).map(Xt)),{type:"Polygon",coordinates:[180===n?i.map(Zt):i]}),l)},s},i.scale(239.75)},t.geoHealpixRaw=Yt,t.geoHill=function(){var t=1,r=e.geoProjectionMutator(Jt),n=r(t);return n.ratio=function(e){return arguments.length?r(t=+e):t},n.scale(167.774).center([0,18.67])},t.geoHillRaw=Jt,t.geoHomolosine=function(){return e.geoProjection(Qt).scale(152.63)},t.geoHomolosineRaw=Qt,t.geoHufnagel=function(){var t=1,r=0,n=45*M,i=2,a=e.geoProjectionMutator($t),o=a(t,r,n,i);return o.a=function(e){return arguments.length?a(t=+e,r,n,i):t},o.b=function(e){return arguments.length?a(t,r=+e,n,i):r},o.psiMax=function(e){return arguments.length?a(t,r,n=+e*M,i):n*A},o.ratio=function(e){return arguments.length?a(t,r,n,i=+e):i},o.scale(180.739)},t.geoHufnagelRaw=$t,t.geoHyperelliptical=function(){var t=0,r=2.5,n=1.183136,i=e.geoProjectionMutator(ee),a=i(t,r,n);return a.alpha=function(e){return arguments.length?i(t=+e,r,n):t},a.k=function(e){return arguments.length?i(t,r=+e,n):r},a.gamma=function(e){return arguments.length?i(t,r,n=+e):n},a.scale(152.63)},t.geoHyperellipticalRaw=ee,t.geoInterrupt=ae,t.geoInterruptedBoggs=function(){return ae(J,oe).scale(160.857)},t.geoInterruptedHomolosine=function(){return ae(Qt,se).scale(152.63)},t.geoInterruptedMollweide=function(){return ae(W,le).scale(169.529)},t.geoInterruptedMollweideHemispheres=function(){return ae(W,ce).scale(169.529).rotate([20,0])},t.geoInterruptedSinuMollweide=function(){return ae(Kt,ue,H).rotate([-20,-55]).scale(164.263).center([0,-5.4036])},t.geoInterruptedSinusoidal=function(){return ae(Q,fe).scale(152.63).rotate([-20,0])},t.geoKavrayskiy7=function(){return e.geoProjection(he).scale(158.837)},t.geoKavrayskiy7Raw=he,t.geoLagrange=function(){var t=.5,r=e.geoProjectionMutator(pe),n=r(t);return n.spacing=function(e){return arguments.length?r(t=+e):t},n.scale(124.75)},t.geoLagrangeRaw=pe,t.geoLarrivee=function(){return e.geoProjection(me).scale(97.2672)},t.geoLarriveeRaw=me,t.geoLaskowski=function(){return e.geoProjection(ge).scale(139.98)},t.geoLaskowskiRaw=ge,t.geoLittrow=function(){return e.geoProjection(ve).scale(144.049).clipAngle(89.999)},t.geoLittrowRaw=ve,t.geoLoximuthal=function(){return K(ye).parallel(40).scale(158.837)},t.geoLoximuthalRaw=ye,t.geoMiller=function(){return e.geoProjection(xe).scale(108.318)},t.geoMillerRaw=xe,t.geoModifiedStereographic=Me,t.geoModifiedStereographicRaw=be,t.geoModifiedStereographicAlaska=function(){return Me(_e,[152,-64]).scale(1400).center([-160.908,62.4864]).clipAngle(30).angle(7.8)},t.geoModifiedStereographicGs48=function(){return Me(we,[95,-38]).scale(1e3).clipAngle(55).center([-96.5563,38.8675])},t.geoModifiedStereographicGs50=function(){return Me(Te,[120,-45]).scale(359.513).clipAngle(55).center([-117.474,53.0628])},t.geoModifiedStereographicMiller=function(){return Me(ke,[-20,-18]).scale(209.091).center([20,16.7214]).clipAngle(82)},t.geoModifiedStereographicLee=function(){return Me(Ae,[165,10]).scale(250).clipAngle(130).center([-165,-10])},t.geoMollweide=function(){return e.geoProjection(W).scale(169.529)},t.geoMollweideRaw=W,t.geoMtFlatPolarParabolic=function(){return e.geoProjection(Le).scale(164.859)},t.geoMtFlatPolarParabolicRaw=Le,t.geoMtFlatPolarQuartic=function(){return e.geoProjection(Ce).scale(188.209)},t.geoMtFlatPolarQuarticRaw=Ce,t.geoMtFlatPolarSinusoidal=function(){return e.geoProjection(Pe).scale(166.518)},t.geoMtFlatPolarSinusoidalRaw=Pe,t.geoNaturalEarth2=function(){return e.geoProjection(Ie).scale(175.295)},t.geoNaturalEarth2Raw=Ie,t.geoNellHammer=function(){return e.geoProjection(Oe).scale(152.63)},t.geoNellHammerRaw=Oe,t.geoInterruptedQuarticAuthalic=function(){return ae(j(1/0),ze).rotate([20,0]).scale(152.63)},t.geoNicolosi=function(){return e.geoProjection(De).scale(127.267)},t.geoNicolosiRaw=De,t.geoPatterson=function(){return e.geoProjection(Re).scale(139.319)},t.geoPattersonRaw=Re,t.geoPolyconic=function(){return e.geoProjection(Fe).scale(103.74)},t.geoPolyconicRaw=Fe,t.geoPolyhedral=Ve,t.geoPolyhedralButterfly=function(t){t=t||function(t){var r=e.geoCentroid({type:"MultiPoint",coordinates:t});return e.geoGnomonic().scale(1).translate([0,0]).rotate([-r[0],-r[1]])};var r=Ye.map((function(e){return{face:e,project:t(e)}}));return[-1,0,0,1,0,1,4,5].forEach((function(t,e){var n=r[t];n&&(n.children||(n.children=[])).push(r[e])})),Ve(r[0],(function(t,e){return r[t<-y/2?e<0?6:4:t<0?e<0?2:0:t0?[-r[0],0]:[180-r[0],180])};var r=Ye.map((function(e){return{face:e,project:t(e)}}));return[-1,0,0,1,0,1,4,5].forEach((function(t,e){var n=r[t];n&&(n.children||(n.children=[])).push(r[e])})),Ve(r[0],(function(t,e){return r[t<-y/2?e<0?6:4:t<0?e<0?2:0:t2||a[0]!=e[0]||a[1]!=e[1])&&(n.push(a),e=a)}return 1===n.length&&t.length>1&&n.push(r(t[t.length-1])),n}function a(t){return t.map(i)}function o(t){if(null==t)return t;var e;switch(t.type){case"GeometryCollection":e={type:"GeometryCollection",geometries:t.geometries.map(o)};break;case"Point":e={type:"Point",coordinates:r(t.coordinates)};break;case"MultiPoint":e={type:t.type,coordinates:n(t.coordinates)};break;case"LineString":e={type:t.type,coordinates:i(t.coordinates)};break;case"MultiLineString":case"Polygon":e={type:t.type,coordinates:a(t.coordinates)};break;case"MultiPolygon":e={type:"MultiPolygon",coordinates:t.coordinates.map(a)};break;default:return t}return null!=t.bbox&&(e.bbox=t.bbox),e}function s(t){var e={type:"Feature",properties:t.properties,geometry:o(t.geometry)};return null!=t.id&&(e.id=t.id),null!=t.bbox&&(e.bbox=t.bbox),e}if(null!=t)switch(t.type){case"Feature":return s(t);case"FeatureCollection":var l={type:"FeatureCollection",features:t.features.map(s)};return null!=t.bbox&&(l.bbox=t.bbox),l;default:return o(t)}return t},t.geoQuincuncial=sr,t.geoRectangularPolyconic=function(){return K(cr).scale(131.215)},t.geoRectangularPolyconicRaw=cr,t.geoRobinson=function(){return e.geoProjection(fr).scale(152.63)},t.geoRobinsonRaw=fr,t.geoSatellite=function(){var t=2,r=0,n=e.geoProjectionMutator(hr),i=n(t,r);return i.distance=function(e){return arguments.length?n(t=+e,r):t},i.tilt=function(e){return arguments.length?n(t,r=e*M):r*A},i.scale(432.147).clipAngle(E(1/t)*A-1e-6)},t.geoSatelliteRaw=hr,t.geoSinuMollweide=function(){return e.geoProjection(Kt).rotate([-20,-55]).scale(164.263).center([0,-5.4036])},t.geoSinuMollweideRaw=Kt,t.geoSinusoidal=function(){return e.geoProjection(Q).scale(152.63)},t.geoSinusoidalRaw=Q,t.geoStitch=function(t){if(null==t)return t;switch(t.type){case"Feature":return wr(t);case"FeatureCollection":var e={type:"FeatureCollection",features:t.features.map(wr)};return null!=t.bbox&&(e.bbox=t.bbox),e;default:return Tr(t)}},t.geoTimes=function(){return e.geoProjection(kr).scale(146.153)},t.geoTimesRaw=kr,t.geoTwoPointAzimuthal=Sr,t.geoTwoPointAzimuthalRaw=Mr,t.geoTwoPointAzimuthalUsa=function(){return Sr([-158,21.5],[-77,39]).clipAngle(60).scale(400)},t.geoTwoPointEquidistant=Lr,t.geoTwoPointEquidistantRaw=Er,t.geoTwoPointEquidistantUsa=function(){return Lr([-158,21.5],[-77,39]).clipAngle(130).scale(122.571)},t.geoVanDerGrinten=function(){return e.geoProjection(Cr).scale(79.4183)},t.geoVanDerGrintenRaw=Cr,t.geoVanDerGrinten2=function(){return e.geoProjection(Pr).scale(79.4183)},t.geoVanDerGrinten2Raw=Pr,t.geoVanDerGrinten3=function(){return e.geoProjection(Ir).scale(79.4183)},t.geoVanDerGrinten3Raw=Ir,t.geoVanDerGrinten4=function(){return e.geoProjection(Or).scale(127.16)},t.geoVanDerGrinten4Raw=Or,t.geoWagner=Dr,t.geoWagner7=function(){return Dr().poleline(65).parallels(60).inflation(0).ratio(200).scale(172.633)},t.geoWagnerRaw=zr,t.geoWagner4=function(){return e.geoProjection(Br).scale(176.84)},t.geoWagner4Raw=Br,t.geoWagner6=function(){return e.geoProjection(Nr).scale(152.63)},t.geoWagner6Raw=Nr,t.geoWiechel=function(){return e.geoProjection(jr).rotate([0,-90,45]).scale(124.75).clipAngle(179.999)},t.geoWiechelRaw=jr,t.geoWinkel3=function(){return e.geoProjection(Ur).scale(158.837)},t.geoWinkel3Raw=Ur,Object.defineProperty(t,"__esModule",{value:!0})}))},{"d3-array":102,"d3-geo":109}],109:[function(t,e,r){!function(n,i){"object"==typeof r&&void 0!==e?i(r,t("d3-array")):i((n=n||self).d3=n.d3||{},n.d3)}(this,(function(t,e){"use strict";function r(){return new n}function n(){this.reset()}n.prototype={constructor:n,reset:function(){this.s=this.t=0},add:function(t){a(i,t,this.t),a(this,i.s,this.s),this.s?this.t+=i.t:this.s=i.t},valueOf:function(){return this.s}};var i=new n;function a(t,e,r){var n=t.s=e+r,i=n-e,a=n-i;t.t=e-a+(r-i)}var o=1e-6,s=Math.PI,l=s/2,c=s/4,u=2*s,f=180/s,h=s/180,p=Math.abs,d=Math.atan,m=Math.atan2,g=Math.cos,v=Math.ceil,y=Math.exp,x=Math.log,b=Math.pow,_=Math.sin,w=Math.sign||function(t){return t>0?1:t<0?-1:0},T=Math.sqrt,k=Math.tan;function A(t){return t>1?0:t<-1?s:Math.acos(t)}function M(t){return t>1?l:t<-1?-l:Math.asin(t)}function S(t){return(t=_(t/2))*t}function E(){}function L(t,e){t&&P.hasOwnProperty(t.type)&&P[t.type](t,e)}var C={Feature:function(t,e){L(t.geometry,e)},FeatureCollection:function(t,e){for(var r=t.features,n=-1,i=r.length;++n=0?1:-1,i=n*r,a=g(e=(e*=h)/2+c),o=_(e),s=N*o,l=B*a+s*g(i),u=s*n*_(i);j.add(m(u,l)),F=t,B=a,N=o}function W(t){return[m(t[1],t[0]),M(t[2])]}function X(t){var e=t[0],r=t[1],n=g(r);return[n*g(e),n*_(e),_(r)]}function Z(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function J(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function K(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function Q(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function $(t){var e=T(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}var tt,et,rt,nt,it,at,ot,st,lt,ct,ut,ft,ht,pt,dt,mt,gt,vt,yt,xt,bt,_t,wt,Tt,kt,At,Mt=r(),St={point:Et,lineStart:Ct,lineEnd:Pt,polygonStart:function(){St.point=It,St.lineStart=Ot,St.lineEnd=zt,Mt.reset(),V.polygonStart()},polygonEnd:function(){V.polygonEnd(),St.point=Et,St.lineStart=Ct,St.lineEnd=Pt,j<0?(tt=-(rt=180),et=-(nt=90)):Mt>o?nt=90:Mt<-o&&(et=-90),ct[0]=tt,ct[1]=rt},sphere:function(){tt=-(rt=180),et=-(nt=90)}};function Et(t,e){lt.push(ct=[tt=t,rt=t]),ent&&(nt=e)}function Lt(t,e){var r=X([t*h,e*h]);if(st){var n=J(st,r),i=J([n[1],-n[0],0],n);$(i),i=W(i);var a,o=t-it,s=o>0?1:-1,l=i[0]*f*s,c=p(o)>180;c^(s*itnt&&(nt=a):c^(s*it<(l=(l+360)%360-180)&&lnt&&(nt=e)),c?tDt(tt,rt)&&(rt=t):Dt(t,rt)>Dt(tt,rt)&&(tt=t):rt>=tt?(trt&&(rt=t)):t>it?Dt(tt,t)>Dt(tt,rt)&&(rt=t):Dt(t,rt)>Dt(tt,rt)&&(tt=t)}else lt.push(ct=[tt=t,rt=t]);ent&&(nt=e),st=r,it=t}function Ct(){St.point=Lt}function Pt(){ct[0]=tt,ct[1]=rt,St.point=Et,st=null}function It(t,e){if(st){var r=t-it;Mt.add(p(r)>180?r+(r>0?360:-360):r)}else at=t,ot=e;V.point(t,e),Lt(t,e)}function Ot(){V.lineStart()}function zt(){It(at,ot),V.lineEnd(),p(Mt)>o&&(tt=-(rt=180)),ct[0]=tt,ct[1]=rt,st=null}function Dt(t,e){return(e-=t)<0?e+360:e}function Rt(t,e){return t[0]-e[0]}function Ft(t,e){return t[0]<=t[1]?t[0]<=e&&e<=t[1]:es?t+Math.round(-t/u)*u:t,e]}function Qt(t,e,r){return(t%=u)?e||r?Jt(te(t),ee(e,r)):te(t):e||r?ee(e,r):Kt}function $t(t){return function(e,r){return[(e+=t)>s?e-u:e<-s?e+u:e,r]}}function te(t){var e=$t(t);return e.invert=$t(-t),e}function ee(t,e){var r=g(t),n=_(t),i=g(e),a=_(e);function o(t,e){var o=g(e),s=g(t)*o,l=_(t)*o,c=_(e),u=c*r+s*n;return[m(l*i-u*a,s*r-c*n),M(u*i+l*a)]}return o.invert=function(t,e){var o=g(e),s=g(t)*o,l=_(t)*o,c=_(e),u=c*i-l*a;return[m(l*i+c*a,s*r+u*n),M(u*r-s*n)]},o}function re(t){function e(e){return(e=t(e[0]*h,e[1]*h))[0]*=f,e[1]*=f,e}return t=Qt(t[0]*h,t[1]*h,t.length>2?t[2]*h:0),e.invert=function(e){return(e=t.invert(e[0]*h,e[1]*h))[0]*=f,e[1]*=f,e},e}function ne(t,e,r,n,i,a){if(r){var o=g(e),s=_(e),l=n*r;null==i?(i=e+n*u,a=e-l/2):(i=ie(o,i),a=ie(o,a),(n>0?ia)&&(i+=n*u));for(var c,f=i;n>0?f>a:f1&&e.push(e.pop().concat(e.shift()))},result:function(){var r=e;return e=[],t=null,r}}}function oe(t,e){return p(t[0]-e[0])=0;--a)i.point((f=u[a])[0],f[1]);else n(p.x,p.p.x,-1,i);p=p.p}u=(p=p.o).z,d=!d}while(!p.v);i.lineEnd()}}}function ce(t){if(e=t.length){for(var e,r,n=0,i=t[0];++n=0?1:-1,z=O*I,D=z>s,R=T*C;if(ue.add(m(R*O*_(z),k*P+R*g(z))),f+=D?I+O*u:I,D^b>=r^E>=r){var F=J(X(x),X(S));$(F);var B=J(a,F);$(B);var N=(D^I>=0?-1:1)*M(B[2]);(n>N||n===N&&(F[0]||F[1]))&&(h+=D^I>=0?1:-1)}}return(f<-o||f0){for(h||(a.polygonStart(),h=!0),a.lineStart(),t=0;t1&&2&i&&c.push(c.pop().concat(c.shift())),s.push(c.filter(de))}return p}}function de(t){return t.length>1}function me(t,e){return((t=t.x)[0]<0?t[1]-l-o:l-t[1])-((e=e.x)[0]<0?e[1]-l-o:l-e[1])}var ge=pe((function(){return!0}),(function(t){var e,r=NaN,n=NaN,i=NaN;return{lineStart:function(){t.lineStart(),e=1},point:function(a,c){var u=a>0?s:-s,f=p(a-r);p(f-s)0?l:-l),t.point(i,n),t.lineEnd(),t.lineStart(),t.point(u,n),t.point(a,n),e=0):i!==u&&f>=s&&(p(r-i)o?d((_(e)*(a=g(n))*_(r)-_(n)*(i=g(e))*_(t))/(i*a*s)):(e+n)/2}(r,n,a,c),t.point(i,n),t.lineEnd(),t.lineStart(),t.point(u,n),e=0),t.point(r=a,n=c),i=u},lineEnd:function(){t.lineEnd(),r=n=NaN},clean:function(){return 2-e}}}),(function(t,e,r,n){var i;if(null==t)i=r*l,n.point(-s,i),n.point(0,i),n.point(s,i),n.point(s,0),n.point(s,-i),n.point(0,-i),n.point(-s,-i),n.point(-s,0),n.point(-s,i);else if(p(t[0]-e[0])>o){var a=t[0]0,i=p(e)>o;function a(t,r){return g(t)*g(r)>e}function l(t,r,n){var i=[1,0,0],a=J(X(t),X(r)),l=Z(a,a),c=a[0],u=l-c*c;if(!u)return!n&&t;var f=e*l/u,h=-e*c/u,d=J(i,a),m=Q(i,f);K(m,Q(a,h));var g=d,v=Z(m,g),y=Z(g,g),x=v*v-y*(Z(m,m)-1);if(!(x<0)){var b=T(x),_=Q(g,(-v-b)/y);if(K(_,m),_=W(_),!n)return _;var w,k=t[0],A=r[0],M=t[1],S=r[1];A0^_[1]<(p(_[0]-k)s^(k<=_[0]&&_[0]<=A)){var C=Q(g,(-v+b)/y);return K(C,m),[_,W(C)]}}}function c(e,r){var i=n?t:s-t,a=0;return e<-i?a|=1:e>i&&(a|=2),r<-i?a|=4:r>i&&(a|=8),a}return pe(a,(function(t){var e,r,o,u,f;return{lineStart:function(){u=o=!1,f=1},point:function(h,p){var d,m=[h,p],g=a(h,p),v=n?g?0:c(h,p):g?c(h+(h<0?s:-s),p):0;if(!e&&(u=o=g)&&t.lineStart(),g!==o&&(!(d=l(e,m))||oe(e,d)||oe(m,d))&&(m[2]=1),g!==o)f=0,g?(t.lineStart(),d=l(m,e),t.point(d[0],d[1])):(d=l(e,m),t.point(d[0],d[1],2),t.lineEnd()),e=d;else if(i&&e&&n^g){var y;v&r||!(y=l(m,e,!0))||(f=0,n?(t.lineStart(),t.point(y[0][0],y[0][1]),t.point(y[1][0],y[1][1]),t.lineEnd()):(t.point(y[1][0],y[1][1]),t.lineEnd(),t.lineStart(),t.point(y[0][0],y[0][1],3)))}!g||e&&oe(e,m)||t.point(m[0],m[1]),e=m,o=g,r=v},lineEnd:function(){o&&t.lineEnd(),e=null},clean:function(){return f|(u&&o)<<1}}}),(function(e,n,i,a){ne(a,t,r,i,e,n)}),n?[0,-t]:[-s,t-s])}function ye(t,r,n,i){function a(e,a){return t<=e&&e<=n&&r<=a&&a<=i}function s(e,a,o,s){var c=0,f=0;if(null==e||(c=l(e,o))!==(f=l(a,o))||u(e,a)<0^o>0)do{s.point(0===c||3===c?t:n,c>1?i:r)}while((c=(c+o+4)%4)!==f);else s.point(a[0],a[1])}function l(e,i){return p(e[0]-t)0?0:3:p(e[0]-n)0?2:1:p(e[1]-r)0?1:0:i>0?3:2}function c(t,e){return u(t.x,e.x)}function u(t,e){var r=l(t,1),n=l(e,1);return r!==n?r-n:0===r?e[1]-t[1]:1===r?t[0]-e[0]:2===r?t[1]-e[1]:e[0]-t[0]}return function(o){var l,u,f,h,p,d,m,g,v,y,x,b=o,_=ae(),w={point:T,lineStart:function(){w.point=k,u&&u.push(f=[]);y=!0,v=!1,m=g=NaN},lineEnd:function(){l&&(k(h,p),d&&v&&_.rejoin(),l.push(_.result()));w.point=T,v&&b.lineEnd()},polygonStart:function(){b=_,l=[],u=[],x=!0},polygonEnd:function(){var r=function(){for(var e=0,r=0,n=u.length;ri&&(h-a)*(i-o)>(p-o)*(t-a)&&++e:p<=i&&(h-a)*(i-o)<(p-o)*(t-a)&&--e;return e}(),n=x&&r,a=(l=e.merge(l)).length;(n||a)&&(o.polygonStart(),n&&(o.lineStart(),s(null,null,1,o),o.lineEnd()),a&&le(l,c,r,s,o),o.polygonEnd());b=o,l=u=f=null}};function T(t,e){a(t,e)&&b.point(t,e)}function k(e,o){var s=a(e,o);if(u&&f.push([e,o]),y)h=e,p=o,d=s,y=!1,s&&(b.lineStart(),b.point(e,o));else if(s&&v)b.point(e,o);else{var l=[m=Math.max(-1e9,Math.min(1e9,m)),g=Math.max(-1e9,Math.min(1e9,g))],c=[e=Math.max(-1e9,Math.min(1e9,e)),o=Math.max(-1e9,Math.min(1e9,o))];!function(t,e,r,n,i,a){var o,s=t[0],l=t[1],c=0,u=1,f=e[0]-s,h=e[1]-l;if(o=r-s,f||!(o>0)){if(o/=f,f<0){if(o0){if(o>u)return;o>c&&(c=o)}if(o=i-s,f||!(o<0)){if(o/=f,f<0){if(o>u)return;o>c&&(c=o)}else if(f>0){if(o0)){if(o/=h,h<0){if(o0){if(o>u)return;o>c&&(c=o)}if(o=a-l,h||!(o<0)){if(o/=h,h<0){if(o>u)return;o>c&&(c=o)}else if(h>0){if(o0&&(t[0]=s+c*f,t[1]=l+c*h),u<1&&(e[0]=s+u*f,e[1]=l+u*h),!0}}}}}(l,c,t,r,n,i)?s&&(b.lineStart(),b.point(e,o),x=!1):(v||(b.lineStart(),b.point(l[0],l[1])),b.point(c[0],c[1]),s||b.lineEnd(),x=!1)}m=e,g=o,v=s}return w}}var xe,be,_e,we=r(),Te={sphere:E,point:E,lineStart:function(){Te.point=Ae,Te.lineEnd=ke},lineEnd:E,polygonStart:E,polygonEnd:E};function ke(){Te.point=Te.lineEnd=E}function Ae(t,e){xe=t*=h,be=_(e*=h),_e=g(e),Te.point=Me}function Me(t,e){t*=h;var r=_(e*=h),n=g(e),i=p(t-xe),a=g(i),o=n*_(i),s=_e*r-be*n*a,l=be*r+_e*n*a;we.add(m(T(o*o+s*s),l)),xe=t,be=r,_e=n}function Se(t){return we.reset(),z(t,Te),+we}var Ee=[null,null],Le={type:"LineString",coordinates:Ee};function Ce(t,e){return Ee[0]=t,Ee[1]=e,Se(Le)}var Pe={Feature:function(t,e){return Oe(t.geometry,e)},FeatureCollection:function(t,e){for(var r=t.features,n=-1,i=r.length;++n0&&(i=Ce(t[a],t[a-1]))>0&&r<=i&&n<=i&&(r+n-i)*(1-Math.pow((r-n)/i,2))<1e-12*i)return!0;r=n}return!1}function Re(t,e){return!!he(t.map(Fe),Be(e))}function Fe(t){return(t=t.map(Be)).pop(),t}function Be(t){return[t[0]*h,t[1]*h]}function Ne(t,r,n){var i=e.range(t,r-o,n).concat(r);return function(t){return i.map((function(e){return[t,e]}))}}function je(t,r,n){var i=e.range(t,r-o,n).concat(r);return function(t){return i.map((function(e){return[e,t]}))}}function Ue(){var t,r,n,i,a,s,l,c,u,f,h,d,m=10,g=m,y=90,x=360,b=2.5;function _(){return{type:"MultiLineString",coordinates:w()}}function w(){return e.range(v(i/y)*y,n,y).map(h).concat(e.range(v(c/x)*x,l,x).map(d)).concat(e.range(v(r/m)*m,t,m).filter((function(t){return p(t%y)>o})).map(u)).concat(e.range(v(s/g)*g,a,g).filter((function(t){return p(t%x)>o})).map(f))}return _.lines=function(){return w().map((function(t){return{type:"LineString",coordinates:t}}))},_.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(d(l).slice(1),h(n).reverse().slice(1),d(c).reverse().slice(1))]}},_.extent=function(t){return arguments.length?_.extentMajor(t).extentMinor(t):_.extentMinor()},_.extentMajor=function(t){return arguments.length?(i=+t[0][0],n=+t[1][0],c=+t[0][1],l=+t[1][1],i>n&&(t=i,i=n,n=t),c>l&&(t=c,c=l,l=t),_.precision(b)):[[i,c],[n,l]]},_.extentMinor=function(e){return arguments.length?(r=+e[0][0],t=+e[1][0],s=+e[0][1],a=+e[1][1],r>t&&(e=r,r=t,t=e),s>a&&(e=s,s=a,a=e),_.precision(b)):[[r,s],[t,a]]},_.step=function(t){return arguments.length?_.stepMajor(t).stepMinor(t):_.stepMinor()},_.stepMajor=function(t){return arguments.length?(y=+t[0],x=+t[1],_):[y,x]},_.stepMinor=function(t){return arguments.length?(m=+t[0],g=+t[1],_):[m,g]},_.precision=function(e){return arguments.length?(b=+e,u=Ne(s,a,90),f=je(r,t,b),h=Ne(c,l,90),d=je(i,n,b),_):b},_.extentMajor([[-180,-90+o],[180,90-o]]).extentMinor([[-180,-80-o],[180,80+o]])}function Ve(t){return t}var He,qe,Ge,Ye,We=r(),Xe=r(),Ze={point:E,lineStart:E,lineEnd:E,polygonStart:function(){Ze.lineStart=Je,Ze.lineEnd=$e},polygonEnd:function(){Ze.lineStart=Ze.lineEnd=Ze.point=E,We.add(p(Xe)),Xe.reset()},result:function(){var t=We/2;return We.reset(),t}};function Je(){Ze.point=Ke}function Ke(t,e){Ze.point=Qe,He=Ge=t,qe=Ye=e}function Qe(t,e){Xe.add(Ye*t-Ge*e),Ge=t,Ye=e}function $e(){Qe(He,qe)}var tr=1/0,er=tr,rr=-tr,nr=rr,ir={point:function(t,e){trr&&(rr=t);enr&&(nr=e)},lineStart:E,lineEnd:E,polygonStart:E,polygonEnd:E,result:function(){var t=[[tr,er],[rr,nr]];return rr=nr=-(er=tr=1/0),t}};var ar,or,sr,lr,cr=0,ur=0,fr=0,hr=0,pr=0,dr=0,mr=0,gr=0,vr=0,yr={point:xr,lineStart:br,lineEnd:Tr,polygonStart:function(){yr.lineStart=kr,yr.lineEnd=Ar},polygonEnd:function(){yr.point=xr,yr.lineStart=br,yr.lineEnd=Tr},result:function(){var t=vr?[mr/vr,gr/vr]:dr?[hr/dr,pr/dr]:fr?[cr/fr,ur/fr]:[NaN,NaN];return cr=ur=fr=hr=pr=dr=mr=gr=vr=0,t}};function xr(t,e){cr+=t,ur+=e,++fr}function br(){yr.point=_r}function _r(t,e){yr.point=wr,xr(sr=t,lr=e)}function wr(t,e){var r=t-sr,n=e-lr,i=T(r*r+n*n);hr+=i*(sr+t)/2,pr+=i*(lr+e)/2,dr+=i,xr(sr=t,lr=e)}function Tr(){yr.point=xr}function kr(){yr.point=Mr}function Ar(){Sr(ar,or)}function Mr(t,e){yr.point=Sr,xr(ar=sr=t,or=lr=e)}function Sr(t,e){var r=t-sr,n=e-lr,i=T(r*r+n*n);hr+=i*(sr+t)/2,pr+=i*(lr+e)/2,dr+=i,mr+=(i=lr*t-sr*e)*(sr+t),gr+=i*(lr+e),vr+=3*i,xr(sr=t,lr=e)}function Er(t){this._context=t}Er.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._context.moveTo(t,e),this._point=1;break;case 1:this._context.lineTo(t,e);break;default:this._context.moveTo(t+this._radius,e),this._context.arc(t,e,this._radius,0,u)}},result:E};var Lr,Cr,Pr,Ir,Or,zr=r(),Dr={point:E,lineStart:function(){Dr.point=Rr},lineEnd:function(){Lr&&Fr(Cr,Pr),Dr.point=E},polygonStart:function(){Lr=!0},polygonEnd:function(){Lr=null},result:function(){var t=+zr;return zr.reset(),t}};function Rr(t,e){Dr.point=Fr,Cr=Ir=t,Pr=Or=e}function Fr(t,e){Ir-=t,Or-=e,zr.add(T(Ir*Ir+Or*Or)),Ir=t,Or=e}function Br(){this._string=[]}function Nr(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function jr(t){return function(e){var r=new Ur;for(var n in t)r[n]=t[n];return r.stream=e,r}}function Ur(){}function Vr(t,e,r){var n=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=n&&t.clipExtent(null),z(r,t.stream(ir)),e(ir.result()),null!=n&&t.clipExtent(n),t}function Hr(t,e,r){return Vr(t,(function(r){var n=e[1][0]-e[0][0],i=e[1][1]-e[0][1],a=Math.min(n/(r[1][0]-r[0][0]),i/(r[1][1]-r[0][1])),o=+e[0][0]+(n-a*(r[1][0]+r[0][0]))/2,s=+e[0][1]+(i-a*(r[1][1]+r[0][1]))/2;t.scale(150*a).translate([o,s])}),r)}function qr(t,e,r){return Hr(t,[[0,0],e],r)}function Gr(t,e,r){return Vr(t,(function(r){var n=+e,i=n/(r[1][0]-r[0][0]),a=(n-i*(r[1][0]+r[0][0]))/2,o=-i*r[0][1];t.scale(150*i).translate([a,o])}),r)}function Yr(t,e,r){return Vr(t,(function(r){var n=+e,i=n/(r[1][1]-r[0][1]),a=-i*r[0][0],o=(n-i*(r[1][1]+r[0][1]))/2;t.scale(150*i).translate([a,o])}),r)}Br.prototype={_radius:4.5,_circle:Nr(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._string.push("M",t,",",e),this._point=1;break;case 1:this._string.push("L",t,",",e);break;default:null==this._circle&&(this._circle=Nr(this._radius)),this._string.push("M",t,",",e,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}},Ur.prototype={constructor:Ur,point:function(t,e){this.stream.point(t,e)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var Wr=g(30*h);function Xr(t,e){return+e?function(t,e){function r(n,i,a,s,l,c,u,f,h,d,g,v,y,x){var b=u-n,_=f-i,w=b*b+_*_;if(w>4*e&&y--){var k=s+d,A=l+g,S=c+v,E=T(k*k+A*A+S*S),L=M(S/=E),C=p(p(S)-1)e||p((b*z+_*D)/w-.5)>.3||s*d+l*g+c*v2?t[2]%360*h:0,I()):[y*f,x*f,b*f]},C.angle=function(t){return arguments.length?(_=t%360*h,I()):_*f},C.reflectX=function(t){return arguments.length?(w=t?-1:1,I()):w<0},C.reflectY=function(t){return arguments.length?(k=t?-1:1,I()):k<0},C.precision=function(t){return arguments.length?(o=Xr(s,L=t*t),O()):T(L)},C.fitExtent=function(t,e){return Hr(C,t,e)},C.fitSize=function(t,e){return qr(C,t,e)},C.fitWidth=function(t,e){return Gr(C,t,e)},C.fitHeight=function(t,e){return Yr(C,t,e)},function(){return e=t.apply(this,arguments),C.invert=e.invert&&P,I()}}function tn(t){var e=0,r=s/3,n=$r(t),i=n(e,r);return i.parallels=function(t){return arguments.length?n(e=t[0]*h,r=t[1]*h):[e*f,r*f]},i}function en(t,e){var r=_(t),n=(r+_(e))/2;if(p(n)0?e<-l+o&&(e=-l+o):e>l-o&&(e=l-o);var r=i/b(fn(e),n);return[r*_(n*t),i-r*g(n*t)]}return a.invert=function(t,e){var r=i-e,a=w(n)*T(t*t+r*r),o=m(t,p(r))*w(r);return r*n<0&&(o-=s*w(t)*w(r)),[o/n,2*d(b(i/a,1/n))-l]},a}function pn(t,e){return[t,e]}function dn(t,e){var r=g(t),n=t===e?_(t):(r-g(e))/(e-t),i=r/n+t;if(p(n)o&&--i>0);return[t/(.8707+(a=n*n)*(a*(a*a*a*(.003971-.001529*a)-.013791)-.131979)),n]},Tn.invert=on(M),kn.invert=on((function(t){return 2*d(t)})),An.invert=function(t,e){return[-e,2*d(y(t))-l]},t.geoAlbers=nn,t.geoAlbersUsa=function(){var t,e,r,n,i,a,s=nn(),l=rn().rotate([154,0]).center([-2,58.5]).parallels([55,65]),c=rn().rotate([157,0]).center([-3,19.9]).parallels([8,18]),u={point:function(t,e){a=[t,e]}};function f(t){var e=t[0],o=t[1];return a=null,r.point(e,o),a||(n.point(e,o),a)||(i.point(e,o),a)}function h(){return t=e=null,f}return f.invert=function(t){var e=s.scale(),r=s.translate(),n=(t[0]-r[0])/e,i=(t[1]-r[1])/e;return(i>=.12&&i<.234&&n>=-.425&&n<-.214?l:i>=.166&&i<.234&&n>=-.214&&n<-.115?c:s).invert(t)},f.stream=function(r){return t&&e===r?t:(n=[s.stream(e=r),l.stream(r),c.stream(r)],i=n.length,t={point:function(t,e){for(var r=-1;++rDt(n[0],n[1])&&(n[1]=i[1]),Dt(i[0],n[1])>Dt(n[0],n[1])&&(n[0]=i[0])):a.push(n=i);for(o=-1/0,e=0,n=a[r=a.length-1];e<=r;n=i,++e)i=a[e],(s=Dt(n[1],i[0]))>o&&(o=s,tt=i[0],rt=n[1])}return lt=ct=null,tt===1/0||et===1/0?[[NaN,NaN],[NaN,NaN]]:[[tt,et],[rt,nt]]},t.geoCentroid=function(t){ut=ft=ht=pt=dt=mt=gt=vt=yt=xt=bt=0,z(t,Bt);var e=yt,r=xt,n=bt,i=e*e+r*r+n*n;return i<1e-12&&(e=mt,r=gt,n=vt,ft2?t[2]+90:90]):[(t=r())[0],t[1],t[2]-90]},r([0,0,90]).scale(159.155)},t.geoTransverseMercatorRaw=An,Object.defineProperty(t,"__esModule",{value:!0})}))},{"d3-array":102}],110:[function(t,e,r){!function(t,n){"object"==typeof r&&void 0!==e?n(r):n((t=t||self).d3=t.d3||{})}(this,(function(t){"use strict";function e(t,e){return t.parent===e.parent?1:2}function r(t,e){return t+e.x}function n(t,e){return Math.max(t,e.y)}function i(t){var e=0,r=t.children,n=r&&r.length;if(n)for(;--n>=0;)e+=r[n].value;else e=1;t.value=e}function a(t,e){var r,n,i,a,s,u=new c(t),f=+t.value&&(u.value=t.value),h=[u];for(null==e&&(e=o);r=h.pop();)if(f&&(r.value=+r.data.value),(i=e(r.data))&&(s=i.length))for(r.children=new Array(s),a=s-1;a>=0;--a)h.push(n=r.children[a]=new c(i[a])),n.parent=r,n.depth=r.depth+1;return u.eachBefore(l)}function o(t){return t.children}function s(t){t.data=t.data.data}function l(t){var e=0;do{t.height=e}while((t=t.parent)&&t.height<++e)}function c(t){this.data=t,this.depth=this.height=0,this.parent=null}c.prototype=a.prototype={constructor:c,count:function(){return this.eachAfter(i)},each:function(t){var e,r,n,i,a=this,o=[a];do{for(e=o.reverse(),o=[];a=e.pop();)if(t(a),r=a.children)for(n=0,i=r.length;n=0;--r)i.push(e[r]);return this},sum:function(t){return this.eachAfter((function(e){for(var r=+t(e.data)||0,n=e.children,i=n&&n.length;--i>=0;)r+=n[i].value;e.value=r}))},sort:function(t){return this.eachBefore((function(e){e.children&&e.children.sort(t)}))},path:function(t){for(var e=this,r=function(t,e){if(t===e)return t;var r=t.ancestors(),n=e.ancestors(),i=null;t=r.pop(),e=n.pop();for(;t===e;)i=t,t=r.pop(),e=n.pop();return i}(e,t),n=[e];e!==r;)e=e.parent,n.push(e);for(var i=n.length;t!==r;)n.splice(i,0,t),t=t.parent;return n},ancestors:function(){for(var t=this,e=[t];t=t.parent;)e.push(t);return e},descendants:function(){var t=[];return this.each((function(e){t.push(e)})),t},leaves:function(){var t=[];return this.eachBefore((function(e){e.children||t.push(e)})),t},links:function(){var t=this,e=[];return t.each((function(r){r!==t&&e.push({source:r.parent,target:r})})),e},copy:function(){return a(this).eachBefore(s)}};var u=Array.prototype.slice;function f(t){for(var e,r,n=0,i=(t=function(t){for(var e,r,n=t.length;n;)r=Math.random()*n--|0,e=t[n],t[n]=t[r],t[r]=e;return t}(u.call(t))).length,a=[];n0&&r*r>n*n+i*i}function m(t,e){for(var r=0;r(o*=o)?(n=(c+o-i)/(2*c),a=Math.sqrt(Math.max(0,o/c-n*n)),r.x=t.x-n*s-a*l,r.y=t.y-n*l+a*s):(n=(c+i-o)/(2*c),a=Math.sqrt(Math.max(0,i/c-n*n)),r.x=e.x+n*s-a*l,r.y=e.y+n*l+a*s)):(r.x=e.x+r.r,r.y=e.y)}function b(t,e){var r=t.r+e.r-1e-6,n=e.x-t.x,i=e.y-t.y;return r>0&&r*r>n*n+i*i}function _(t){var e=t._,r=t.next._,n=e.r+r.r,i=(e.x*r.r+r.x*e.r)/n,a=(e.y*r.r+r.y*e.r)/n;return i*i+a*a}function w(t){this._=t,this.next=null,this.previous=null}function T(t){if(!(i=t.length))return 0;var e,r,n,i,a,o,s,l,c,u,h;if((e=t[0]).x=0,e.y=0,!(i>1))return e.r;if(r=t[1],e.x=-r.r,r.x=e.r,r.y=0,!(i>2))return e.r+r.r;x(r,e,n=t[2]),e=new w(e),r=new w(r),n=new w(n),e.next=n.previous=r,r.next=e.previous=n,n.next=r.previous=e;t:for(s=3;sh&&(h=s),g=u*u*m,(p=Math.max(h/g,g/f))>d){u-=s;break}d=p}v.push(o={value:u,dice:l1?e:1)},r}(G);var X=function t(e){function r(t,r,n,i,a){if((o=t._squarify)&&o.ratio===e)for(var o,s,l,c,u,f=-1,h=o.length,p=t.value;++f1?e:1)},r}(G);t.cluster=function(){var t=e,i=1,a=1,o=!1;function s(e){var s,l=0;e.eachAfter((function(e){var i=e.children;i?(e.x=function(t){return t.reduce(r,0)/t.length}(i),e.y=function(t){return 1+t.reduce(n,0)}(i)):(e.x=s?l+=t(e,s):0,e.y=0,s=e)}));var c=function(t){for(var e;e=t.children;)t=e[0];return t}(e),u=function(t){for(var e;e=t.children;)t=e[e.length-1];return t}(e),f=c.x-t(c,u)/2,h=u.x+t(u,c)/2;return e.eachAfter(o?function(t){t.x=(t.x-e.x)*i,t.y=(e.y-t.y)*a}:function(t){t.x=(t.x-f)/(h-f)*i,t.y=(1-(e.y?t.y/e.y:1))*a})}return s.separation=function(e){return arguments.length?(t=e,s):t},s.size=function(t){return arguments.length?(o=!1,i=+t[0],a=+t[1],s):o?null:[i,a]},s.nodeSize=function(t){return arguments.length?(o=!0,i=+t[0],a=+t[1],s):o?[i,a]:null},s},t.hierarchy=a,t.pack=function(){var t=null,e=1,r=1,n=M;function i(i){return i.x=e/2,i.y=r/2,t?i.eachBefore(L(t)).eachAfter(C(n,.5)).eachBefore(P(1)):i.eachBefore(L(E)).eachAfter(C(M,1)).eachAfter(C(n,i.r/Math.min(e,r))).eachBefore(P(Math.min(e,r)/(2*i.r))),i}return i.radius=function(e){return arguments.length?(t=k(e),i):t},i.size=function(t){return arguments.length?(e=+t[0],r=+t[1],i):[e,r]},i.padding=function(t){return arguments.length?(n="function"==typeof t?t:S(+t),i):n},i},t.packEnclose=f,t.packSiblings=function(t){return T(t),t},t.partition=function(){var t=1,e=1,r=0,n=!1;function i(i){var a=i.height+1;return i.x0=i.y0=r,i.x1=t,i.y1=e/a,i.eachBefore(function(t,e){return function(n){n.children&&O(n,n.x0,t*(n.depth+1)/e,n.x1,t*(n.depth+2)/e);var i=n.x0,a=n.y0,o=n.x1-r,s=n.y1-r;o0)throw new Error("cycle");return a}return r.id=function(e){return arguments.length?(t=A(e),r):t},r.parentId=function(t){return arguments.length?(e=A(t),r):e},r},t.tree=function(){var t=B,e=1,r=1,n=null;function i(i){var l=function(t){for(var e,r,n,i,a,o=new H(t,0),s=[o];e=s.pop();)if(n=e._.children)for(e.children=new Array(a=n.length),i=a-1;i>=0;--i)s.push(r=e.children[i]=new H(n[i],i)),r.parent=e;return(o.parent=new H(null,0)).children=[o],o}(i);if(l.eachAfter(a),l.parent.m=-l.z,l.eachBefore(o),n)i.eachBefore(s);else{var c=i,u=i,f=i;i.eachBefore((function(t){t.xu.x&&(u=t),t.depth>f.depth&&(f=t)}));var h=c===u?1:t(c,u)/2,p=h-c.x,d=e/(u.x+h+p),m=r/(f.depth||1);i.eachBefore((function(t){t.x=(t.x+p)*d,t.y=t.depth*m}))}return i}function a(e){var r=e.children,n=e.parent.children,i=e.i?n[e.i-1]:null;if(r){!function(t){for(var e,r=0,n=0,i=t.children,a=i.length;--a>=0;)(e=i[a]).z+=r,e.m+=r,r+=e.s+(n+=e.c)}(e);var a=(r[0].z+r[r.length-1].z)/2;i?(e.z=i.z+t(e._,i._),e.m=e.z-a):e.z=a}else i&&(e.z=i.z+t(e._,i._));e.parent.A=function(e,r,n){if(r){for(var i,a=e,o=e,s=r,l=a.parent.children[0],c=a.m,u=o.m,f=s.m,h=l.m;s=j(s),a=N(a),s&&a;)l=N(l),(o=j(o)).a=e,(i=s.z+f-a.z-c+t(s._,a._))>0&&(U(V(s,e,n),e,i),c+=i,u+=i),f+=s.m,c+=a.m,h+=l.m,u+=o.m;s&&!j(o)&&(o.t=s,o.m+=f-u),a&&!N(l)&&(l.t=a,l.m+=c-h,n=e)}return n}(e,i,e.parent.A||n[0])}function o(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function s(t){t.x*=e,t.y=t.depth*r}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(n=!1,e=+t[0],r=+t[1],i):n?null:[e,r]},i.nodeSize=function(t){return arguments.length?(n=!0,e=+t[0],r=+t[1],i):n?[e,r]:null},i},t.treemap=function(){var t=W,e=!1,r=1,n=1,i=[0],a=M,o=M,s=M,l=M,c=M;function u(t){return t.x0=t.y0=0,t.x1=r,t.y1=n,t.eachBefore(f),i=[0],e&&t.eachBefore(I),t}function f(e){var r=i[e.depth],n=e.x0+r,u=e.y0+r,f=e.x1-r,h=e.y1-r;f=r-1){var u=s[e];return u.x0=i,u.y0=a,u.x1=o,void(u.y1=l)}var f=c[e],h=n/2+f,p=e+1,d=r-1;for(;p>>1;c[m]l-a){var y=(i*v+o*g)/n;t(e,p,g,i,a,y,l),t(p,r,v,y,a,o,l)}else{var x=(a*v+l*g)/n;t(e,p,g,i,a,o,x),t(p,r,v,i,x,o,l)}}(0,l,t.value,e,r,n,i)},t.treemapDice=O,t.treemapResquarify=X,t.treemapSlice=q,t.treemapSliceDice=function(t,e,r,n,i){(1&t.depth?q:O)(t,e,r,n,i)},t.treemapSquarify=W,Object.defineProperty(t,"__esModule",{value:!0})}))},{}],111:[function(t,e,r){!function(n,i){"object"==typeof r&&void 0!==e?i(r,t("d3-color")):i((n=n||self).d3=n.d3||{},n.d3)}(this,(function(t,e){"use strict";function r(t,e,r,n,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*r+(1+3*t+3*a-3*o)*n+o*i)/6}function n(t){var e=t.length-1;return function(n){var i=n<=0?n=0:n>=1?(n=1,e-1):Math.floor(n*e),a=t[i],o=t[i+1],s=i>0?t[i-1]:2*a-o,l=i180||r<-180?r-360*Math.round(r/360):r):a(isNaN(t)?e:t)}function l(t){return 1==(t=+t)?c:function(e,r){return r-e?function(t,e,r){return t=Math.pow(t,r),e=Math.pow(e,r)-t,r=1/r,function(n){return Math.pow(t+n*e,r)}}(e,r,t):a(isNaN(e)?r:e)}}function c(t,e){var r=e-t;return r?o(t,r):a(isNaN(t)?e:t)}var u=function t(r){var n=l(r);function i(t,r){var i=n((t=e.rgb(t)).r,(r=e.rgb(r)).r),a=n(t.g,r.g),o=n(t.b,r.b),s=c(t.opacity,r.opacity);return function(e){return t.r=i(e),t.g=a(e),t.b=o(e),t.opacity=s(e),t+""}}return i.gamma=t,i}(1);function f(t){return function(r){var n,i,a=r.length,o=new Array(a),s=new Array(a),l=new Array(a);for(n=0;na&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(r=r[0])===(n=n[0])?s[o]?s[o]+=n:s[++o]=n:(s[++o]=null,l.push({i:o,x:y(r,n)})),a=_.lastIndex;return a180?e+=360:e-t>180&&(t+=360),a.push({i:r.push(i(r)+"rotate(",null,n)-2,x:y(t,e)})):e&&r.push(i(r)+"rotate("+e+n)}(a.rotate,o.rotate,s,l),function(t,e,r,a){t!==e?a.push({i:r.push(i(r)+"skewX(",null,n)-2,x:y(t,e)}):e&&r.push(i(r)+"skewX("+e+n)}(a.skewX,o.skewX,s,l),function(t,e,r,n,a,o){if(t!==r||e!==n){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:y(t,r)},{i:s-2,x:y(e,n)})}else 1===r&&1===n||a.push(i(a)+"scale("+r+","+n+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,l),a=o=null,function(t){for(var e,r=-1,n=l.length;++r1e-6)if(Math.abs(f*l-c*u)>1e-6&&a){var p=n-o,d=i-s,m=l*l+c*c,g=p*p+d*d,v=Math.sqrt(m),y=Math.sqrt(h),x=a*Math.tan((e-Math.acos((m+h-g)/(2*v*y)))/2),b=x/y,_=x/v;Math.abs(b-1)>1e-6&&(this._+="L"+(t+b*u)+","+(r+b*f)),this._+="A"+a+","+a+",0,0,"+ +(f*p>u*d)+","+(this._x1=t+_*l)+","+(this._y1=r+_*c)}else this._+="L"+(this._x1=t)+","+(this._y1=r);else;},arc:function(t,i,a,o,s,l){t=+t,i=+i,l=!!l;var c=(a=+a)*Math.cos(o),u=a*Math.sin(o),f=t+c,h=i+u,p=1^l,d=l?o-s:s-o;if(a<0)throw new Error("negative radius: "+a);null===this._x1?this._+="M"+f+","+h:(Math.abs(this._x1-f)>1e-6||Math.abs(this._y1-h)>1e-6)&&(this._+="L"+f+","+h),a&&(d<0&&(d=d%r+r),d>n?this._+="A"+a+","+a+",0,1,"+p+","+(t-c)+","+(i-u)+"A"+a+","+a+",0,1,"+p+","+(this._x1=f)+","+(this._y1=h):d>1e-6&&(this._+="A"+a+","+a+",0,"+ +(d>=e)+","+p+","+(this._x1=t+a*Math.cos(s))+","+(this._y1=i+a*Math.sin(s))))},rect:function(t,e,r,n){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +r+"v"+ +n+"h"+-r+"Z"},toString:function(){return this._}},t.path=a,Object.defineProperty(t,"__esModule",{value:!0})}))},{}],113:[function(t,e,r){!function(t,n){"object"==typeof r&&void 0!==e?n(r):n((t=t||self).d3=t.d3||{})}(this,(function(t){"use strict";function e(t,e,r,n){if(isNaN(e)||isNaN(r))return t;var i,a,o,s,l,c,u,f,h,p=t._root,d={data:n},m=t._x0,g=t._y0,v=t._x1,y=t._y1;if(!p)return t._root=d,t;for(;p.length;)if((c=e>=(a=(m+v)/2))?m=a:v=a,(u=r>=(o=(g+y)/2))?g=o:y=o,i=p,!(p=p[f=u<<1|c]))return i[f]=d,t;if(s=+t._x.call(null,p.data),l=+t._y.call(null,p.data),e===s&&r===l)return d.next=p,i?i[f]=d:t._root=d,t;do{i=i?i[f]=new Array(4):t._root=new Array(4),(c=e>=(a=(m+v)/2))?m=a:v=a,(u=r>=(o=(g+y)/2))?g=o:y=o}while((f=u<<1|c)==(h=(l>=o)<<1|s>=a));return i[h]=p,i[f]=d,t}function r(t,e,r,n,i){this.node=t,this.x0=e,this.y0=r,this.x1=n,this.y1=i}function n(t){return t[0]}function i(t){return t[1]}function a(t,e,r){var a=new o(null==e?n:e,null==r?i:r,NaN,NaN,NaN,NaN);return null==t?a:a.addAll(t)}function o(t,e,r,n,i,a){this._x=t,this._y=e,this._x0=r,this._y0=n,this._x1=i,this._y1=a,this._root=void 0}function s(t){for(var e={data:t.data},r=e;t=t.next;)r=r.next={data:t.data};return e}var l=a.prototype=o.prototype;l.copy=function(){var t,e,r=new o(this._x,this._y,this._x0,this._y0,this._x1,this._y1),n=this._root;if(!n)return r;if(!n.length)return r._root=s(n),r;for(t=[{source:n,target:r._root=new Array(4)}];n=t.pop();)for(var i=0;i<4;++i)(e=n.source[i])&&(e.length?t.push({source:e,target:n.target[i]=new Array(4)}):n.target[i]=s(e));return r},l.add=function(t){var r=+this._x.call(null,t),n=+this._y.call(null,t);return e(this.cover(r,n),r,n,t)},l.addAll=function(t){var r,n,i,a,o=t.length,s=new Array(o),l=new Array(o),c=1/0,u=1/0,f=-1/0,h=-1/0;for(n=0;nf&&(f=i),ah&&(h=a));if(c>f||u>h)return this;for(this.cover(c,u).cover(f,h),n=0;nt||t>=i||n>e||e>=a;)switch(s=(ep||(o=c.y0)>d||(s=c.x1)=y)<<1|t>=v)&&(c=m[m.length-1],m[m.length-1]=m[m.length-1-u],m[m.length-1-u]=c)}else{var x=t-+this._x.call(null,g.data),b=e-+this._y.call(null,g.data),_=x*x+b*b;if(_=(s=(d+g)/2))?d=s:g=s,(u=o>=(l=(m+v)/2))?m=l:v=l,e=p,!(p=p[f=u<<1|c]))return this;if(!p.length)break;(e[f+1&3]||e[f+2&3]||e[f+3&3])&&(r=e,h=f)}for(;p.data!==t;)if(n=p,!(p=p.next))return this;return(i=p.next)&&delete p.next,n?(i?n.next=i:delete n.next,this):e?(i?e[f]=i:delete e[f],(p=e[0]||e[1]||e[2]||e[3])&&p===(e[3]||e[2]||e[1]||e[0])&&!p.length&&(r?r[h]=p:this._root=p),this):(this._root=i,this)},l.removeAll=function(t){for(var e=0,r=t.length;e1?0:t<-1?u:Math.acos(t)}function d(t){return t>=1?f:t<=-1?-f:Math.asin(t)}function m(t){return t.innerRadius}function g(t){return t.outerRadius}function v(t){return t.startAngle}function y(t){return t.endAngle}function x(t){return t&&t.padAngle}function b(t,e,r,n,i,a,o,s){var l=r-t,c=n-e,u=o-i,f=s-a,h=f*l-u*c;if(!(h*h<1e-12))return[t+(h=(u*(e-a)-f*(t-i))/h)*l,e+h*c]}function _(t,e,r,n,i,a,s){var l=t-r,u=e-n,f=(s?a:-a)/c(l*l+u*u),h=f*u,p=-f*l,d=t+h,m=e+p,g=r+h,v=n+p,y=(d+g)/2,x=(m+v)/2,b=g-d,_=v-m,w=b*b+_*_,T=i-a,k=d*v-g*m,A=(_<0?-1:1)*c(o(0,T*T*w-k*k)),M=(k*_-b*A)/w,S=(-k*b-_*A)/w,E=(k*_+b*A)/w,L=(-k*b+_*A)/w,C=M-y,P=S-x,I=E-y,O=L-x;return C*C+P*P>I*I+O*O&&(M=E,S=L),{cx:M,cy:S,x01:-h,y01:-p,x11:M*(i/T-1),y11:S*(i/T-1)}}function w(t){this._context=t}function T(t){return new w(t)}function k(t){return t[0]}function A(t){return t[1]}function M(){var t=k,n=A,i=r(!0),a=null,o=T,s=null;function l(r){var l,c,u,f=r.length,h=!1;for(null==a&&(s=o(u=e.path())),l=0;l<=f;++l)!(l=f;--h)c.point(v[h],y[h]);c.lineEnd(),c.areaEnd()}g&&(v[u]=+t(p,u,r),y[u]=+i(p,u,r),c.point(n?+n(p,u,r):v[u],a?+a(p,u,r):y[u]))}if(d)return c=null,d+""||null}function f(){return M().defined(o).curve(l).context(s)}return u.x=function(e){return arguments.length?(t="function"==typeof e?e:r(+e),n=null,u):t},u.x0=function(e){return arguments.length?(t="function"==typeof e?e:r(+e),u):t},u.x1=function(t){return arguments.length?(n=null==t?null:"function"==typeof t?t:r(+t),u):n},u.y=function(t){return arguments.length?(i="function"==typeof t?t:r(+t),a=null,u):i},u.y0=function(t){return arguments.length?(i="function"==typeof t?t:r(+t),u):i},u.y1=function(t){return arguments.length?(a=null==t?null:"function"==typeof t?t:r(+t),u):a},u.lineX0=u.lineY0=function(){return f().x(t).y(i)},u.lineY1=function(){return f().x(t).y(a)},u.lineX1=function(){return f().x(n).y(i)},u.defined=function(t){return arguments.length?(o="function"==typeof t?t:r(!!t),u):o},u.curve=function(t){return arguments.length?(l=t,null!=s&&(c=l(s)),u):l},u.context=function(t){return arguments.length?(null==t?s=c=null:c=l(s=t),u):s},u}function E(t,e){return et?1:e>=t?0:NaN}function L(t){return t}w.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:this._context.lineTo(t,e)}}};var C=I(T);function P(t){this._curve=t}function I(t){function e(e){return new P(t(e))}return e._curve=t,e}function O(t){var e=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?e(I(t)):e()._curve},t}function z(){return O(M().curve(C))}function D(){var t=S().curve(C),e=t.curve,r=t.lineX0,n=t.lineX1,i=t.lineY0,a=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return O(r())},delete t.lineX0,t.lineEndAngle=function(){return O(n())},delete t.lineX1,t.lineInnerRadius=function(){return O(i())},delete t.lineY0,t.lineOuterRadius=function(){return O(a())},delete t.lineY1,t.curve=function(t){return arguments.length?e(I(t)):e()._curve},t}function R(t,e){return[(e=+e)*Math.cos(t-=Math.PI/2),e*Math.sin(t)]}P.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,e){this._curve.point(e*Math.sin(t),e*-Math.cos(t))}};var F=Array.prototype.slice;function B(t){return t.source}function N(t){return t.target}function j(t){var n=B,i=N,a=k,o=A,s=null;function l(){var r,l=F.call(arguments),c=n.apply(this,l),u=i.apply(this,l);if(s||(s=r=e.path()),t(s,+a.apply(this,(l[0]=c,l)),+o.apply(this,l),+a.apply(this,(l[0]=u,l)),+o.apply(this,l)),r)return s=null,r+""||null}return l.source=function(t){return arguments.length?(n=t,l):n},l.target=function(t){return arguments.length?(i=t,l):i},l.x=function(t){return arguments.length?(a="function"==typeof t?t:r(+t),l):a},l.y=function(t){return arguments.length?(o="function"==typeof t?t:r(+t),l):o},l.context=function(t){return arguments.length?(s=null==t?null:t,l):s},l}function U(t,e,r,n,i){t.moveTo(e,r),t.bezierCurveTo(e=(e+n)/2,r,e,i,n,i)}function V(t,e,r,n,i){t.moveTo(e,r),t.bezierCurveTo(e,r=(r+i)/2,n,r,n,i)}function H(t,e,r,n,i){var a=R(e,r),o=R(e,r=(r+i)/2),s=R(n,r),l=R(n,i);t.moveTo(a[0],a[1]),t.bezierCurveTo(o[0],o[1],s[0],s[1],l[0],l[1])}var q={draw:function(t,e){var r=Math.sqrt(e/u);t.moveTo(r,0),t.arc(0,0,r,0,h)}},G={draw:function(t,e){var r=Math.sqrt(e/5)/2;t.moveTo(-3*r,-r),t.lineTo(-r,-r),t.lineTo(-r,-3*r),t.lineTo(r,-3*r),t.lineTo(r,-r),t.lineTo(3*r,-r),t.lineTo(3*r,r),t.lineTo(r,r),t.lineTo(r,3*r),t.lineTo(-r,3*r),t.lineTo(-r,r),t.lineTo(-3*r,r),t.closePath()}},Y=Math.sqrt(1/3),W=2*Y,X={draw:function(t,e){var r=Math.sqrt(e/W),n=r*Y;t.moveTo(0,-r),t.lineTo(n,0),t.lineTo(0,r),t.lineTo(-n,0),t.closePath()}},Z=Math.sin(u/10)/Math.sin(7*u/10),J=Math.sin(h/10)*Z,K=-Math.cos(h/10)*Z,Q={draw:function(t,e){var r=Math.sqrt(.8908130915292852*e),n=J*r,i=K*r;t.moveTo(0,-r),t.lineTo(n,i);for(var a=1;a<5;++a){var o=h*a/5,s=Math.cos(o),l=Math.sin(o);t.lineTo(l*r,-s*r),t.lineTo(s*n-l*i,l*n+s*i)}t.closePath()}},$={draw:function(t,e){var r=Math.sqrt(e),n=-r/2;t.rect(n,n,r,r)}},tt=Math.sqrt(3),et={draw:function(t,e){var r=-Math.sqrt(e/(3*tt));t.moveTo(0,2*r),t.lineTo(-tt*r,-r),t.lineTo(tt*r,-r),t.closePath()}},rt=-.5,nt=Math.sqrt(3)/2,it=1/Math.sqrt(12),at=3*(it/2+1),ot={draw:function(t,e){var r=Math.sqrt(e/at),n=r/2,i=r*it,a=n,o=r*it+r,s=-a,l=o;t.moveTo(n,i),t.lineTo(a,o),t.lineTo(s,l),t.lineTo(rt*n-nt*i,nt*n+rt*i),t.lineTo(rt*a-nt*o,nt*a+rt*o),t.lineTo(rt*s-nt*l,nt*s+rt*l),t.lineTo(rt*n+nt*i,rt*i-nt*n),t.lineTo(rt*a+nt*o,rt*o-nt*a),t.lineTo(rt*s+nt*l,rt*l-nt*s),t.closePath()}},st=[q,G,X,$,Q,et,ot];function lt(){}function ct(t,e,r){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+e)/6,(t._y0+4*t._y1+r)/6)}function ut(t){this._context=t}function ft(t){this._context=t}function ht(t){this._context=t}function pt(t,e){this._basis=new ut(t),this._beta=e}ut.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:ct(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:ct(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}},ft.prototype={areaStart:lt,areaEnd:lt,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x2=t,this._y2=e;break;case 1:this._point=2,this._x3=t,this._y3=e;break;case 2:this._point=3,this._x4=t,this._y4=e,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+e)/6);break;default:ct(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}},ht.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var r=(this._x0+4*this._x1+t)/6,n=(this._y0+4*this._y1+e)/6;this._line?this._context.lineTo(r,n):this._context.moveTo(r,n);break;case 3:this._point=4;default:ct(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}},pt.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,e=this._y,r=t.length-1;if(r>0)for(var n,i=t[0],a=e[0],o=t[r]-i,s=e[r]-a,l=-1;++l<=r;)n=l/r,this._basis.point(this._beta*t[l]+(1-this._beta)*(i+n*o),this._beta*e[l]+(1-this._beta)*(a+n*s));this._x=this._y=null,this._basis.lineEnd()},point:function(t,e){this._x.push(+t),this._y.push(+e)}};var dt=function t(e){function r(t){return 1===e?new ut(t):new pt(t,e)}return r.beta=function(e){return t(+e)},r}(.85);function mt(t,e,r){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-e),t._y2+t._k*(t._y1-r),t._x2,t._y2)}function gt(t,e){this._context=t,this._k=(1-e)/6}gt.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:mt(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2,this._x1=t,this._y1=e;break;case 2:this._point=3;default:mt(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var vt=function t(e){function r(t){return new gt(t,e)}return r.tension=function(e){return t(+e)},r}(0);function yt(t,e){this._context=t,this._k=(1-e)/6}yt.prototype={areaStart:lt,areaEnd:lt,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:mt(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var xt=function t(e){function r(t){return new yt(t,e)}return r.tension=function(e){return t(+e)},r}(0);function bt(t,e){this._context=t,this._k=(1-e)/6}bt.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:mt(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var _t=function t(e){function r(t){return new bt(t,e)}return r.tension=function(e){return t(+e)},r}(0);function wt(t,e,r){var n=t._x1,i=t._y1,a=t._x2,o=t._y2;if(t._l01_a>1e-12){var s=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,l=3*t._l01_a*(t._l01_a+t._l12_a);n=(n*s-t._x0*t._l12_2a+t._x2*t._l01_2a)/l,i=(i*s-t._y0*t._l12_2a+t._y2*t._l01_2a)/l}if(t._l23_a>1e-12){var c=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,u=3*t._l23_a*(t._l23_a+t._l12_a);a=(a*c+t._x1*t._l23_2a-e*t._l12_2a)/u,o=(o*c+t._y1*t._l23_2a-r*t._l12_2a)/u}t._context.bezierCurveTo(n,i,a,o,t._x2,t._y2)}function Tt(t,e){this._context=t,this._alpha=e}Tt.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var r=this._x2-t,n=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(r*r+n*n,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3;default:wt(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var kt=function t(e){function r(t){return e?new Tt(t,e):new gt(t,0)}return r.alpha=function(e){return t(+e)},r}(.5);function At(t,e){this._context=t,this._alpha=e}At.prototype={areaStart:lt,areaEnd:lt,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){if(t=+t,e=+e,this._point){var r=this._x2-t,n=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(r*r+n*n,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:wt(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Mt=function t(e){function r(t){return e?new At(t,e):new yt(t,0)}return r.alpha=function(e){return t(+e)},r}(.5);function St(t,e){this._context=t,this._alpha=e}St.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var r=this._x2-t,n=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(r*r+n*n,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:wt(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Et=function t(e){function r(t){return e?new St(t,e):new bt(t,0)}return r.alpha=function(e){return t(+e)},r}(.5);function Lt(t){this._context=t}function Ct(t){return t<0?-1:1}function Pt(t,e,r){var n=t._x1-t._x0,i=e-t._x1,a=(t._y1-t._y0)/(n||i<0&&-0),o=(r-t._y1)/(i||n<0&&-0),s=(a*i+o*n)/(n+i);return(Ct(a)+Ct(o))*Math.min(Math.abs(a),Math.abs(o),.5*Math.abs(s))||0}function It(t,e){var r=t._x1-t._x0;return r?(3*(t._y1-t._y0)/r-e)/2:e}function Ot(t,e,r){var n=t._x0,i=t._y0,a=t._x1,o=t._y1,s=(a-n)/3;t._context.bezierCurveTo(n+s,i+s*e,a-s,o-s*r,a,o)}function zt(t){this._context=t}function Dt(t){this._context=new Rt(t)}function Rt(t){this._context=t}function Ft(t){this._context=t}function Bt(t){var e,r,n=t.length-1,i=new Array(n),a=new Array(n),o=new Array(n);for(i[0]=0,a[0]=2,o[0]=t[0]+2*t[1],e=1;e=0;--e)i[e]=(o[e]-i[e+1])/a[e];for(a[n-1]=(t[n]+i[n-1])/2,e=0;e1)for(var r,n,i,a=1,o=t[e[0]],s=o.length;a=0;)r[e]=e;return r}function Vt(t,e){return t[e]}function Ht(t){var e=t.map(qt);return Ut(t).sort((function(t,r){return e[t]-e[r]}))}function qt(t){for(var e,r=-1,n=0,i=t.length,a=-1/0;++ra&&(a=e,n=r);return n}function Gt(t){var e=t.map(Yt);return Ut(t).sort((function(t,r){return e[t]-e[r]}))}function Yt(t){for(var e,r=0,n=-1,i=t.length;++n=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,e),this._context.lineTo(t,e);else{var r=this._x*(1-this._t)+t*this._t;this._context.lineTo(r,this._y),this._context.lineTo(r,e)}}this._x=t,this._y=e}},t.arc=function(){var t=m,o=g,w=r(0),T=null,k=v,A=y,M=x,S=null;function E(){var r,m,g=+t.apply(this,arguments),v=+o.apply(this,arguments),y=k.apply(this,arguments)-f,x=A.apply(this,arguments)-f,E=n(x-y),L=x>y;if(S||(S=r=e.path()),v1e-12)if(E>h-1e-12)S.moveTo(v*a(y),v*l(y)),S.arc(0,0,v,y,x,!L),g>1e-12&&(S.moveTo(g*a(x),g*l(x)),S.arc(0,0,g,x,y,L));else{var C,P,I=y,O=x,z=y,D=x,R=E,F=E,B=M.apply(this,arguments)/2,N=B>1e-12&&(T?+T.apply(this,arguments):c(g*g+v*v)),j=s(n(v-g)/2,+w.apply(this,arguments)),U=j,V=j;if(N>1e-12){var H=d(N/g*l(B)),q=d(N/v*l(B));(R-=2*H)>1e-12?(z+=H*=L?1:-1,D-=H):(R=0,z=D=(y+x)/2),(F-=2*q)>1e-12?(I+=q*=L?1:-1,O-=q):(F=0,I=O=(y+x)/2)}var G=v*a(I),Y=v*l(I),W=g*a(D),X=g*l(D);if(j>1e-12){var Z,J=v*a(O),K=v*l(O),Q=g*a(z),$=g*l(z);if(E1e-12?V>1e-12?(C=_(Q,$,G,Y,v,V,L),P=_(J,K,W,X,v,V,L),S.moveTo(C.cx+C.x01,C.cy+C.y01),V1e-12&&R>1e-12?U>1e-12?(C=_(W,X,J,K,g,-U,L),P=_(G,Y,Q,$,g,-U,L),S.lineTo(C.cx+C.x01,C.cy+C.y01),U0&&(d+=f);for(null!=e?m.sort((function(t,r){return e(g[t],g[r])})):null!=n&&m.sort((function(t,e){return n(r[t],r[e])})),s=0,c=d?(y-p*b)/d:0;s0?f*c:0)+b,g[l]={data:r[l],index:s,value:f,startAngle:v,endAngle:u,padAngle:x};return g}return s.value=function(e){return arguments.length?(t="function"==typeof e?e:r(+e),s):t},s.sortValues=function(t){return arguments.length?(e=t,n=null,s):e},s.sort=function(t){return arguments.length?(n=t,e=null,s):n},s.startAngle=function(t){return arguments.length?(i="function"==typeof t?t:r(+t),s):i},s.endAngle=function(t){return arguments.length?(a="function"==typeof t?t:r(+t),s):a},s.padAngle=function(t){return arguments.length?(o="function"==typeof t?t:r(+t),s):o},s},t.pointRadial=R,t.radialArea=D,t.radialLine=z,t.stack=function(){var t=r([]),e=Ut,n=jt,i=Vt;function a(r){var a,o,s=t.apply(this,arguments),l=r.length,c=s.length,u=new Array(c);for(a=0;a0)for(var r,n,i,a,o,s,l=0,c=t[e[0]].length;l0?(n[0]=a,n[1]=a+=i):i<0?(n[1]=o,n[0]=o+=i):(n[0]=0,n[1]=i)},t.stackOffsetExpand=function(t,e){if((n=t.length)>0){for(var r,n,i,a=0,o=t[0].length;a0){for(var r,n=0,i=t[e[0]],a=i.length;n0&&(n=(r=t[e[0]]).length)>0){for(var r,n,i,a=0,o=1;o=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:wt,s:Tt,S:H,u:q,U:G,V:W,w:X,W:Z,x:null,X:null,y:J,Y:Q,Z:tt,"%":_t},zt={a:function(t){return f[t.getUTCDay()]},A:function(t){return u[t.getUTCDay()]},b:function(t){return Y[t.getUTCMonth()]},B:function(t){return h[t.getUTCMonth()]},c:null,d:et,e:et,f:ot,g:vt,G:xt,H:rt,I:nt,j:it,L:at,m:st,M:lt,p:function(t){return c[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:wt,s:Tt,S:ct,u:ut,U:ft,V:pt,w:dt,W:mt,x:null,X:null,y:gt,Y:yt,Z:bt,"%":_t},Dt={a:function(t,e,r){var n=St.exec(e.slice(r));return n?(t.w=Et[n[0].toLowerCase()],r+n[0].length):-1},A:function(t,e,r){var n=At.exec(e.slice(r));return n?(t.w=Mt[n[0].toLowerCase()],r+n[0].length):-1},b:function(t,e,r){var n=Pt.exec(e.slice(r));return n?(t.m=It[n[0].toLowerCase()],r+n[0].length):-1},B:function(t,e,r){var n=Lt.exec(e.slice(r));return n?(t.m=Ct[n[0].toLowerCase()],r+n[0].length):-1},c:function(t,e,r){return Bt(t,a,e,r)},d:A,e:A,f:P,g:_,G:b,H:S,I:S,j:M,L:C,m:k,M:E,p:function(t,e,r){var n=ht.exec(e.slice(r));return n?(t.p=kt[n[0].toLowerCase()],r+n[0].length):-1},q:T,Q:O,s:z,S:L,u:g,U:v,V:y,w:m,W:x,x:function(t,e,r){return Bt(t,o,e,r)},X:function(t,e,r){return Bt(t,l,e,r)},y:_,Y:b,Z:w,"%":I};function Rt(t,e){return function(r){var n,i,a,o=[],l=-1,c=0,u=t.length;for(r instanceof Date||(r=new Date(+r));++l53)return null;"w"in c||(c.w=1),"Z"in c?(l=(s=n(i(c.y,0,1))).getUTCDay(),s=l>4||0===l?e.utcMonday.ceil(s):e.utcMonday(s),s=e.utcDay.offset(s,7*(c.V-1)),c.y=s.getUTCFullYear(),c.m=s.getUTCMonth(),c.d=s.getUTCDate()+(c.w+6)%7):(l=(s=r(i(c.y,0,1))).getDay(),s=l>4||0===l?e.timeMonday.ceil(s):e.timeMonday(s),s=e.timeDay.offset(s,7*(c.V-1)),c.y=s.getFullYear(),c.m=s.getMonth(),c.d=s.getDate()+(c.w+6)%7)}else("W"in c||"U"in c)&&("w"in c||(c.w="u"in c?c.u%7:"W"in c?1:0),l="Z"in c?n(i(c.y,0,1)).getUTCDay():r(i(c.y,0,1)).getDay(),c.m=0,c.d="W"in c?(c.w+6)%7+7*c.W-(l+5)%7:c.w+7*c.U-(l+6)%7);return"Z"in c?(c.H+=c.Z/100|0,c.M+=c.Z%100,n(c)):r(c)}}function Bt(t,e,r,n){for(var i,a,o=0,l=e.length,c=r.length;o=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=Dt[i in s?e.charAt(o++):i])||(n=a(t,r,n))<0)return-1}else if(i!=r.charCodeAt(n++))return-1}return n}return Ot.x=Rt(o,Ot),Ot.X=Rt(l,Ot),Ot.c=Rt(a,Ot),zt.x=Rt(o,zt),zt.X=Rt(l,zt),zt.c=Rt(a,zt),{format:function(t){var e=Rt(t+="",Ot);return e.toString=function(){return t},e},parse:function(t){var e=Ft(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=Rt(t+="",zt);return e.toString=function(){return t},e},utcParse:function(t){var e=Ft(t+="",!0);return e.toString=function(){return t},e}}}var o,s={"-":"",_:" ",0:"0"},l=/^\s*\d+/,c=/^%/,u=/[\\^$*+?|[\]().{}]/g;function f(t,e,r){var n=t<0?"-":"",i=(n?-t:t)+"",a=i.length;return n+(a68?1900:2e3),r+n[0].length):-1}function w(t,e,r){var n=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(r,r+6));return n?(t.Z=n[1]?0:-(n[2]+(n[3]||"00")),r+n[0].length):-1}function T(t,e,r){var n=l.exec(e.slice(r,r+1));return n?(t.q=3*n[0]-3,r+n[0].length):-1}function k(t,e,r){var n=l.exec(e.slice(r,r+2));return n?(t.m=n[0]-1,r+n[0].length):-1}function A(t,e,r){var n=l.exec(e.slice(r,r+2));return n?(t.d=+n[0],r+n[0].length):-1}function M(t,e,r){var n=l.exec(e.slice(r,r+3));return n?(t.m=0,t.d=+n[0],r+n[0].length):-1}function S(t,e,r){var n=l.exec(e.slice(r,r+2));return n?(t.H=+n[0],r+n[0].length):-1}function E(t,e,r){var n=l.exec(e.slice(r,r+2));return n?(t.M=+n[0],r+n[0].length):-1}function L(t,e,r){var n=l.exec(e.slice(r,r+2));return n?(t.S=+n[0],r+n[0].length):-1}function C(t,e,r){var n=l.exec(e.slice(r,r+3));return n?(t.L=+n[0],r+n[0].length):-1}function P(t,e,r){var n=l.exec(e.slice(r,r+6));return n?(t.L=Math.floor(n[0]/1e3),r+n[0].length):-1}function I(t,e,r){var n=c.exec(e.slice(r,r+1));return n?r+n[0].length:-1}function O(t,e,r){var n=l.exec(e.slice(r));return n?(t.Q=+n[0],r+n[0].length):-1}function z(t,e,r){var n=l.exec(e.slice(r));return n?(t.s=+n[0],r+n[0].length):-1}function D(t,e){return f(t.getDate(),e,2)}function R(t,e){return f(t.getHours(),e,2)}function F(t,e){return f(t.getHours()%12||12,e,2)}function B(t,r){return f(1+e.timeDay.count(e.timeYear(t),t),r,3)}function N(t,e){return f(t.getMilliseconds(),e,3)}function j(t,e){return N(t,e)+"000"}function U(t,e){return f(t.getMonth()+1,e,2)}function V(t,e){return f(t.getMinutes(),e,2)}function H(t,e){return f(t.getSeconds(),e,2)}function q(t){var e=t.getDay();return 0===e?7:e}function G(t,r){return f(e.timeSunday.count(e.timeYear(t)-1,t),r,2)}function Y(t){var r=t.getDay();return r>=4||0===r?e.timeThursday(t):e.timeThursday.ceil(t)}function W(t,r){return t=Y(t),f(e.timeThursday.count(e.timeYear(t),t)+(4===e.timeYear(t).getDay()),r,2)}function X(t){return t.getDay()}function Z(t,r){return f(e.timeMonday.count(e.timeYear(t)-1,t),r,2)}function J(t,e){return f(t.getFullYear()%100,e,2)}function K(t,e){return f((t=Y(t)).getFullYear()%100,e,2)}function Q(t,e){return f(t.getFullYear()%1e4,e,4)}function $(t,r){var n=t.getDay();return f((t=n>=4||0===n?e.timeThursday(t):e.timeThursday.ceil(t)).getFullYear()%1e4,r,4)}function tt(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+f(e/60|0,"0",2)+f(e%60,"0",2)}function et(t,e){return f(t.getUTCDate(),e,2)}function rt(t,e){return f(t.getUTCHours(),e,2)}function nt(t,e){return f(t.getUTCHours()%12||12,e,2)}function it(t,r){return f(1+e.utcDay.count(e.utcYear(t),t),r,3)}function at(t,e){return f(t.getUTCMilliseconds(),e,3)}function ot(t,e){return at(t,e)+"000"}function st(t,e){return f(t.getUTCMonth()+1,e,2)}function lt(t,e){return f(t.getUTCMinutes(),e,2)}function ct(t,e){return f(t.getUTCSeconds(),e,2)}function ut(t){var e=t.getUTCDay();return 0===e?7:e}function ft(t,r){return f(e.utcSunday.count(e.utcYear(t)-1,t),r,2)}function ht(t){var r=t.getUTCDay();return r>=4||0===r?e.utcThursday(t):e.utcThursday.ceil(t)}function pt(t,r){return t=ht(t),f(e.utcThursday.count(e.utcYear(t),t)+(4===e.utcYear(t).getUTCDay()),r,2)}function dt(t){return t.getUTCDay()}function mt(t,r){return f(e.utcMonday.count(e.utcYear(t)-1,t),r,2)}function gt(t,e){return f(t.getUTCFullYear()%100,e,2)}function vt(t,e){return f((t=ht(t)).getUTCFullYear()%100,e,2)}function yt(t,e){return f(t.getUTCFullYear()%1e4,e,4)}function xt(t,r){var n=t.getUTCDay();return f((t=n>=4||0===n?e.utcThursday(t):e.utcThursday.ceil(t)).getUTCFullYear()%1e4,r,4)}function bt(){return"+0000"}function _t(){return"%"}function wt(t){return+t}function Tt(t){return Math.floor(+t/1e3)}function kt(e){return o=a(e),t.timeFormat=o.format,t.timeParse=o.parse,t.utcFormat=o.utcFormat,t.utcParse=o.utcParse,o}kt({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});var At=Date.prototype.toISOString?function(t){return t.toISOString()}:t.utcFormat("%Y-%m-%dT%H:%M:%S.%LZ");var Mt=+new Date("2000-01-01T00:00:00.000Z")?function(t){var e=new Date(t);return isNaN(e)?null:e}:t.utcParse("%Y-%m-%dT%H:%M:%S.%LZ");t.isoFormat=At,t.isoParse=Mt,t.timeFormatDefaultLocale=kt,t.timeFormatLocale=a,Object.defineProperty(t,"__esModule",{value:!0})}))},{"d3-time":116}],116:[function(t,e,r){!function(t,n){"object"==typeof r&&void 0!==e?n(r):n((t=t||self).d3=t.d3||{})}(this,(function(t){"use strict";var e=new Date,r=new Date;function n(t,i,a,o){function s(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return s.floor=function(e){return t(e=new Date(+e)),e},s.ceil=function(e){return t(e=new Date(e-1)),i(e,1),t(e),e},s.round=function(t){var e=s(t),r=s.ceil(t);return t-e0))return o;do{o.push(a=new Date(+e)),i(e,n),t(e)}while(a=r)for(;t(r),!e(r);)r.setTime(r-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;i(t,-1),!e(t););else for(;--r>=0;)for(;i(t,1),!e(t););}))},a&&(s.count=function(n,i){return e.setTime(+n),r.setTime(+i),t(e),t(r),Math.floor(a(e,r))},s.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?s.filter(o?function(e){return o(e)%t==0}:function(e){return s.count(0,e)%t==0}):s:null}),s}var i=n((function(){}),(function(t,e){t.setTime(+t+e)}),(function(t,e){return e-t}));i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?n((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,r){e.setTime(+e+r*t)}),(function(e,r){return(r-e)/t})):i:null};var a=i.range,o=n((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,e){t.setTime(+t+1e3*e)}),(function(t,e){return(e-t)/1e3}),(function(t){return t.getUTCSeconds()})),s=o.range,l=n((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds())}),(function(t,e){t.setTime(+t+6e4*e)}),(function(t,e){return(e-t)/6e4}),(function(t){return t.getMinutes()})),c=l.range,u=n((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds()-6e4*t.getMinutes())}),(function(t,e){t.setTime(+t+36e5*e)}),(function(t,e){return(e-t)/36e5}),(function(t){return t.getHours()})),f=u.range,h=n((function(t){t.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/864e5}),(function(t){return t.getDate()-1})),p=h.range;function d(t){return n((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/6048e5}))}var m=d(0),g=d(1),v=d(2),y=d(3),x=d(4),b=d(5),_=d(6),w=m.range,T=g.range,k=v.range,A=y.range,M=x.range,S=b.range,E=_.range,L=n((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})),C=L.range,P=n((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));P.every=function(t){return isFinite(t=Math.floor(t))&&t>0?n((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,r){e.setFullYear(e.getFullYear()+r*t)})):null};var I=P.range,O=n((function(t){t.setUTCSeconds(0,0)}),(function(t,e){t.setTime(+t+6e4*e)}),(function(t,e){return(e-t)/6e4}),(function(t){return t.getUTCMinutes()})),z=O.range,D=n((function(t){t.setUTCMinutes(0,0,0)}),(function(t,e){t.setTime(+t+36e5*e)}),(function(t,e){return(e-t)/36e5}),(function(t){return t.getUTCHours()})),R=D.range,F=n((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/864e5}),(function(t){return t.getUTCDate()-1})),B=F.range;function N(t){return n((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/6048e5}))}var j=N(0),U=N(1),V=N(2),H=N(3),q=N(4),G=N(5),Y=N(6),W=j.range,X=U.range,Z=V.range,J=H.range,K=q.range,Q=G.range,$=Y.range,tt=n((function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCMonth(t.getUTCMonth()+e)}),(function(t,e){return e.getUTCMonth()-t.getUTCMonth()+12*(e.getUTCFullYear()-t.getUTCFullYear())}),(function(t){return t.getUTCMonth()})),et=tt.range,rt=n((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));rt.every=function(t){return isFinite(t=Math.floor(t))&&t>0?n((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,r){e.setUTCFullYear(e.getUTCFullYear()+r*t)})):null};var nt=rt.range;t.timeDay=h,t.timeDays=p,t.timeFriday=b,t.timeFridays=S,t.timeHour=u,t.timeHours=f,t.timeInterval=n,t.timeMillisecond=i,t.timeMilliseconds=a,t.timeMinute=l,t.timeMinutes=c,t.timeMonday=g,t.timeMondays=T,t.timeMonth=L,t.timeMonths=C,t.timeSaturday=_,t.timeSaturdays=E,t.timeSecond=o,t.timeSeconds=s,t.timeSunday=m,t.timeSundays=w,t.timeThursday=x,t.timeThursdays=M,t.timeTuesday=v,t.timeTuesdays=k,t.timeWednesday=y,t.timeWednesdays=A,t.timeWeek=m,t.timeWeeks=w,t.timeYear=P,t.timeYears=I,t.utcDay=F,t.utcDays=B,t.utcFriday=G,t.utcFridays=Q,t.utcHour=D,t.utcHours=R,t.utcMillisecond=i,t.utcMilliseconds=a,t.utcMinute=O,t.utcMinutes=z,t.utcMonday=U,t.utcMondays=X,t.utcMonth=tt,t.utcMonths=et,t.utcSaturday=Y,t.utcSaturdays=$,t.utcSecond=o,t.utcSeconds=s,t.utcSunday=j,t.utcSundays=W,t.utcThursday=q,t.utcThursdays=K,t.utcTuesday=V,t.utcTuesdays=Z,t.utcWednesday=H,t.utcWednesdays=J,t.utcWeek=j,t.utcWeeks=W,t.utcYear=rt,t.utcYears=nt,Object.defineProperty(t,"__esModule",{value:!0})}))},{}],117:[function(t,e,r){!function(t,n){"object"==typeof r&&void 0!==e?n(r):n((t=t||self).d3=t.d3||{})}(this,(function(t){"use strict";var e,r,n=0,i=0,a=0,o=0,s=0,l=0,c="object"==typeof performance&&performance.now?performance:Date,u="object"==typeof window&&window.requestAnimationFrame?window.requestAnimationFrame.bind(window):function(t){setTimeout(t,17)};function f(){return s||(u(h),s=c.now()+l)}function h(){s=0}function p(){this._call=this._time=this._next=null}function d(t,e,r){var n=new p;return n.restart(t,e,r),n}function m(){f(),++n;for(var t,r=e;r;)(t=s-r._time)>=0&&r._call.call(null,t),r=r._next;--n}function g(){s=(o=c.now())+l,n=i=0;try{m()}finally{n=0,function(){var t,n,i=e,a=1/0;for(;i;)i._call?(a>i._time&&(a=i._time),t=i,i=i._next):(n=i._next,i._next=null,i=t?t._next=n:e=n);r=t,y(a)}(),s=0}}function v(){var t=c.now(),e=t-o;e>1e3&&(l-=e,o=t)}function y(t){n||(i&&(i=clearTimeout(i)),t-s>24?(t<1/0&&(i=setTimeout(g,t-c.now()-l)),a&&(a=clearInterval(a))):(a||(o=c.now(),a=setInterval(v,1e3)),n=1,u(g)))}p.prototype=d.prototype={constructor:p,restart:function(t,n,i){if("function"!=typeof t)throw new TypeError("callback is not a function");i=(null==i?f():+i)+(null==n?0:+n),this._next||r===this||(r?r._next=this:e=this,r=this),this._call=t,this._time=i,y()},stop:function(){this._call&&(this._call=null,this._time=1/0,y())}},t.interval=function(t,e,r){var n=new p,i=e;return null==e?(n.restart(t,e,r),n):(e=+e,r=null==r?f():+r,n.restart((function a(o){o+=i,n.restart(a,i+=e,r),t(o)}),e,r),n)},t.now=f,t.timeout=function(t,e,r){var n=new p;return e=null==e?0:+e,n.restart((function(r){n.stop(),t(r+e)}),e,r),n},t.timer=d,t.timerFlush=m,Object.defineProperty(t,"__esModule",{value:!0})}))},{}],118:[function(t,e,r){e.exports=function(){for(var t=0;ts*l){var p=(h-f)/s;a[u]=1e3*p}}return a}function o(t){for(var e=[],r=t[0];r<=t[1];r++)for(var n=String.fromCharCode(r),i=t[0];i0)return function(t,e){var r,n;for(r=new Array(t),n=0;n80*r){n=l=t[0],s=c=t[1];for(var b=r;bl&&(l=u),p>c&&(c=p);d=0!==(d=Math.max(l-n,c-s))?1/d:0}return o(y,x,r,n,s,d),x}function i(t,e,r,n,i){var a,o;if(i===E(t,e,r,n)>0)for(a=e;a=e;a-=n)o=A(a,t[a],t[a+1],o);return o&&x(o,o.next)&&(M(o),o=o.next),o}function a(t,e){if(!t)return t;e||(e=t);var r,n=t;do{if(r=!1,n.steiner||!x(n,n.next)&&0!==y(n.prev,n,n.next))n=n.next;else{if(M(n),(n=e=n.prev)===n.next)break;r=!0}}while(r||n!==e);return e}function o(t,e,r,n,i,f,h){if(t){!h&&f&&function(t,e,r,n){var i=t;do{null===i.z&&(i.z=d(i.x,i.y,e,r,n)),i.prevZ=i.prev,i.nextZ=i.next,i=i.next}while(i!==t);i.prevZ.nextZ=null,i.prevZ=null,function(t){var e,r,n,i,a,o,s,l,c=1;do{for(r=t,t=null,a=null,o=0;r;){for(o++,n=r,s=0,e=0;e0||l>0&&n;)0!==s&&(0===l||!n||r.z<=n.z)?(i=r,r=r.nextZ,s--):(i=n,n=n.nextZ,l--),a?a.nextZ=i:t=i,i.prevZ=a,a=i;r=n}a.nextZ=null,c*=2}while(o>1)}(i)}(t,n,i,f);for(var p,m,g=t;t.prev!==t.next;)if(p=t.prev,m=t.next,f?l(t,n,i,f):s(t))e.push(p.i/r),e.push(t.i/r),e.push(m.i/r),M(t),t=m.next,g=m.next;else if((t=m)===g){h?1===h?o(t=c(a(t),e,r),e,r,n,i,f,2):2===h&&u(t,e,r,n,i,f):o(a(t),e,r,n,i,f,1);break}}}function s(t){var e=t.prev,r=t,n=t.next;if(y(e,r,n)>=0)return!1;for(var i=t.next.next;i!==t.prev;){if(g(e.x,e.y,r.x,r.y,n.x,n.y,i.x,i.y)&&y(i.prev,i,i.next)>=0)return!1;i=i.next}return!0}function l(t,e,r,n){var i=t.prev,a=t,o=t.next;if(y(i,a,o)>=0)return!1;for(var s=i.xa.x?i.x>o.x?i.x:o.x:a.x>o.x?a.x:o.x,u=i.y>a.y?i.y>o.y?i.y:o.y:a.y>o.y?a.y:o.y,f=d(s,l,e,r,n),h=d(c,u,e,r,n),p=t.prevZ,m=t.nextZ;p&&p.z>=f&&m&&m.z<=h;){if(p!==t.prev&&p!==t.next&&g(i.x,i.y,a.x,a.y,o.x,o.y,p.x,p.y)&&y(p.prev,p,p.next)>=0)return!1;if(p=p.prevZ,m!==t.prev&&m!==t.next&&g(i.x,i.y,a.x,a.y,o.x,o.y,m.x,m.y)&&y(m.prev,m,m.next)>=0)return!1;m=m.nextZ}for(;p&&p.z>=f;){if(p!==t.prev&&p!==t.next&&g(i.x,i.y,a.x,a.y,o.x,o.y,p.x,p.y)&&y(p.prev,p,p.next)>=0)return!1;p=p.prevZ}for(;m&&m.z<=h;){if(m!==t.prev&&m!==t.next&&g(i.x,i.y,a.x,a.y,o.x,o.y,m.x,m.y)&&y(m.prev,m,m.next)>=0)return!1;m=m.nextZ}return!0}function c(t,e,r){var n=t;do{var i=n.prev,o=n.next.next;!x(i,o)&&b(i,n,n.next,o)&&T(i,o)&&T(o,i)&&(e.push(i.i/r),e.push(n.i/r),e.push(o.i/r),M(n),M(n.next),n=t=o),n=n.next}while(n!==t);return a(n)}function u(t,e,r,n,i,s){var l=t;do{for(var c=l.next.next;c!==l.prev;){if(l.i!==c.i&&v(l,c)){var u=k(l,c);return l=a(l,l.next),u=a(u,u.next),o(l,e,r,n,i,s),void o(u,e,r,n,i,s)}c=c.next}l=l.next}while(l!==t)}function f(t,e){return t.x-e.x}function h(t,e){var r=function(t,e){var r,n=e,i=t.x,a=t.y,o=-1/0;do{if(a<=n.y&&a>=n.next.y&&n.next.y!==n.y){var s=n.x+(a-n.y)*(n.next.x-n.x)/(n.next.y-n.y);if(s<=i&&s>o){if(o=s,s===i){if(a===n.y)return n;if(a===n.next.y)return n.next}r=n.x=n.x&&n.x>=u&&i!==n.x&&g(ar.x||n.x===r.x&&p(r,n)))&&(r=n,h=l)),n=n.next}while(n!==c);return r}(t,e);if(!r)return e;var n=k(r,t),i=a(r,r.next);return a(n,n.next),e===r?i:e}function p(t,e){return y(t.prev,t,e.prev)<0&&y(e.next,t,t.next)<0}function d(t,e,r,n,i){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t=32767*(t-r)*i)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e=32767*(e-n)*i)|e<<8))|e<<4))|e<<2))|e<<1))<<1}function m(t){var e=t,r=t;do{(e.x=0&&(t-o)*(n-s)-(r-o)*(e-s)>=0&&(r-o)*(a-s)-(i-o)*(n-s)>=0}function v(t,e){return t.next.i!==e.i&&t.prev.i!==e.i&&!function(t,e){var r=t;do{if(r.i!==t.i&&r.next.i!==t.i&&r.i!==e.i&&r.next.i!==e.i&&b(r,r.next,t,e))return!0;r=r.next}while(r!==t);return!1}(t,e)&&(T(t,e)&&T(e,t)&&function(t,e){var r=t,n=!1,i=(t.x+e.x)/2,a=(t.y+e.y)/2;do{r.y>a!=r.next.y>a&&r.next.y!==r.y&&i<(r.next.x-r.x)*(a-r.y)/(r.next.y-r.y)+r.x&&(n=!n),r=r.next}while(r!==t);return n}(t,e)&&(y(t.prev,t,e.prev)||y(t,e.prev,e))||x(t,e)&&y(t.prev,t,t.next)>0&&y(e.prev,e,e.next)>0)}function y(t,e,r){return(e.y-t.y)*(r.x-e.x)-(e.x-t.x)*(r.y-e.y)}function x(t,e){return t.x===e.x&&t.y===e.y}function b(t,e,r,n){var i=w(y(t,e,r)),a=w(y(t,e,n)),o=w(y(r,n,t)),s=w(y(r,n,e));return i!==a&&o!==s||(!(0!==i||!_(t,r,e))||(!(0!==a||!_(t,n,e))||(!(0!==o||!_(r,t,n))||!(0!==s||!_(r,e,n)))))}function _(t,e,r){return e.x<=Math.max(t.x,r.x)&&e.x>=Math.min(t.x,r.x)&&e.y<=Math.max(t.y,r.y)&&e.y>=Math.min(t.y,r.y)}function w(t){return t>0?1:t<0?-1:0}function T(t,e){return y(t.prev,t,t.next)<0?y(t,e,t.next)>=0&&y(t,t.prev,e)>=0:y(t,e,t.prev)<0||y(t,t.next,e)<0}function k(t,e){var r=new S(t.i,t.x,t.y),n=new S(e.i,e.x,e.y),i=t.next,a=e.prev;return t.next=e,e.prev=t,r.next=i,i.prev=r,n.next=r,r.prev=n,a.next=n,n.prev=a,n}function A(t,e,r,n){var i=new S(t,e,r);return n?(i.next=n.next,i.prev=n,n.next.prev=i,n.next=i):(i.prev=i,i.next=i),i}function M(t){t.next.prev=t.prev,t.prev.next=t.next,t.prevZ&&(t.prevZ.nextZ=t.nextZ),t.nextZ&&(t.nextZ.prevZ=t.prevZ)}function S(t,e,r){this.i=t,this.x=e,this.y=r,this.prev=null,this.next=null,this.z=null,this.prevZ=null,this.nextZ=null,this.steiner=!1}function E(t,e,r,n){for(var i=0,a=e,o=r-n;a0&&(n+=t[i-1].length,r.holes.push(n))}return r}},{}],124:[function(t,e,r){var n=t("strongly-connected-components");e.exports=function(t,e){var r,i=[],a=[],o=[],s={},l=[];function c(t){var e,n,i=!1;for(a.push(t),o[t]=!0,e=0;e=e}))}(e);for(var r,i=n(t).components.filter((function(t){return t.length>1})),a=1/0,o=0;o=55296&&y<=56319&&(w+=t[++r]),w=T?h.call(T,k,w,m):w,e?(p.value=w,d(g,m,p)):g[m]=w,++m;v=m}if(void 0===v)for(v=o(t.length),e&&(g=new e(v)),r=0;r0?1:-1}},{}],135:[function(t,e,r){"use strict";var n=t("../math/sign"),i=Math.abs,a=Math.floor;e.exports=function(t){return isNaN(t)?0:0!==(t=Number(t))&&isFinite(t)?n(t)*a(i(t)):t}},{"../math/sign":132}],136:[function(t,e,r){"use strict";var n=t("./to-integer"),i=Math.max;e.exports=function(t){return i(0,n(t))}},{"./to-integer":135}],137:[function(t,e,r){"use strict";var n=t("./valid-callable"),i=t("./valid-value"),a=Function.prototype.bind,o=Function.prototype.call,s=Object.keys,l=Object.prototype.propertyIsEnumerable;e.exports=function(t,e){return function(r,c){var u,f=arguments[2],h=arguments[3];return r=Object(i(r)),n(c),u=s(r),h&&u.sort("function"==typeof h?a.call(h,r):void 0),"function"!=typeof t&&(t=u[t]),o.call(t,u,(function(t,n){return l.call(r,t)?o.call(c,f,r[t],t,r,n):e}))}}},{"./valid-callable":154,"./valid-value":156}],138:[function(t,e,r){"use strict";e.exports=t("./is-implemented")()?Object.assign:t("./shim")},{"./is-implemented":139,"./shim":140}],139:[function(t,e,r){"use strict";e.exports=function(){var t,e=Object.assign;return"function"==typeof e&&(e(t={foo:"raz"},{bar:"dwa"},{trzy:"trzy"}),t.foo+t.bar+t.trzy==="razdwatrzy")}},{}],140:[function(t,e,r){"use strict";var n=t("../keys"),i=t("../valid-value"),a=Math.max;e.exports=function(t,e){var r,o,s,l=a(arguments.length,2);for(t=Object(i(t)),s=function(n){try{t[n]=e[n]}catch(t){r||(r=t)}},o=1;o-1}},{}],160:[function(t,e,r){"use strict";var n=Object.prototype.toString,i=n.call("");e.exports=function(t){return"string"==typeof t||t&&"object"==typeof t&&(t instanceof String||n.call(t)===i)||!1}},{}],161:[function(t,e,r){"use strict";var n=Object.create(null),i=Math.random;e.exports=function(){var t;do{t=i().toString(36).slice(2)}while(n[t]);return t}},{}],162:[function(t,e,r){"use strict";var n,i=t("es5-ext/object/set-prototype-of"),a=t("es5-ext/string/#/contains"),o=t("d"),s=t("es6-symbol"),l=t("./"),c=Object.defineProperty;n=e.exports=function(t,e){if(!(this instanceof n))throw new TypeError("Constructor requires 'new'");l.call(this,t),e=e?a.call(e,"key+value")?"key+value":a.call(e,"key")?"key":"value":"value",c(this,"__kind__",o("",e))},i&&i(n,l),delete n.prototype.constructor,n.prototype=Object.create(l.prototype,{_resolve:o((function(t){return"value"===this.__kind__?this.__list__[t]:"key+value"===this.__kind__?[t,this.__list__[t]]:t}))}),c(n.prototype,s.toStringTag,o("c","Array Iterator"))},{"./":165,d:101,"es5-ext/object/set-prototype-of":151,"es5-ext/string/#/contains":157,"es6-symbol":169}],163:[function(t,e,r){"use strict";var n=t("es5-ext/function/is-arguments"),i=t("es5-ext/object/valid-callable"),a=t("es5-ext/string/is-string"),o=t("./get"),s=Array.isArray,l=Function.prototype.call,c=Array.prototype.some;e.exports=function(t,e){var r,u,f,h,p,d,m,g,v=arguments[2];if(s(t)||n(t)?r="array":a(t)?r="string":t=o(t),i(e),f=function(){h=!0},"array"!==r)if("string"!==r)for(u=t.next();!u.done;){if(l.call(e,v,u.value,f),h)return;u=t.next()}else for(d=t.length,p=0;p=55296&&g<=56319&&(m+=t[++p]),l.call(e,v,m,f),!h);++p);else c.call(t,(function(t){return l.call(e,v,t,f),h}))}},{"./get":164,"es5-ext/function/is-arguments":129,"es5-ext/object/valid-callable":154,"es5-ext/string/is-string":160}],164:[function(t,e,r){"use strict";var n=t("es5-ext/function/is-arguments"),i=t("es5-ext/string/is-string"),a=t("./array"),o=t("./string"),s=t("./valid-iterable"),l=t("es6-symbol").iterator;e.exports=function(t){return"function"==typeof s(t)[l]?t[l]():n(t)?new a(t):i(t)?new o(t):new a(t)}},{"./array":162,"./string":167,"./valid-iterable":168,"es5-ext/function/is-arguments":129,"es5-ext/string/is-string":160,"es6-symbol":169}],165:[function(t,e,r){"use strict";var n,i=t("es5-ext/array/#/clear"),a=t("es5-ext/object/assign"),o=t("es5-ext/object/valid-callable"),s=t("es5-ext/object/valid-value"),l=t("d"),c=t("d/auto-bind"),u=t("es6-symbol"),f=Object.defineProperty,h=Object.defineProperties;e.exports=n=function(t,e){if(!(this instanceof n))throw new TypeError("Constructor requires 'new'");h(this,{__list__:l("w",s(t)),__context__:l("w",e),__nextIndex__:l("w",0)}),e&&(o(e.on),e.on("_add",this._onAdd),e.on("_delete",this._onDelete),e.on("_clear",this._onClear))},delete n.prototype.constructor,h(n.prototype,a({_next:l((function(){var t;if(this.__list__)return this.__redo__&&void 0!==(t=this.__redo__.shift())?t:this.__nextIndex__=this.__nextIndex__||(++this.__nextIndex__,this.__redo__?(this.__redo__.forEach((function(e,r){e>=t&&(this.__redo__[r]=++e)}),this),this.__redo__.push(t)):f(this,"__redo__",l("c",[t])))})),_onDelete:l((function(t){var e;t>=this.__nextIndex__||(--this.__nextIndex__,this.__redo__&&(-1!==(e=this.__redo__.indexOf(t))&&this.__redo__.splice(e,1),this.__redo__.forEach((function(e,r){e>t&&(this.__redo__[r]=--e)}),this)))})),_onClear:l((function(){this.__redo__&&i.call(this.__redo__),this.__nextIndex__=0}))}))),f(n.prototype,u.iterator,l((function(){return this})))},{d:101,"d/auto-bind":100,"es5-ext/array/#/clear":125,"es5-ext/object/assign":138,"es5-ext/object/valid-callable":154,"es5-ext/object/valid-value":156,"es6-symbol":169}],166:[function(t,e,r){"use strict";var n=t("es5-ext/function/is-arguments"),i=t("es5-ext/object/is-value"),a=t("es5-ext/string/is-string"),o=t("es6-symbol").iterator,s=Array.isArray;e.exports=function(t){return!!i(t)&&(!!s(t)||(!!a(t)||(!!n(t)||"function"==typeof t[o])))}},{"es5-ext/function/is-arguments":129,"es5-ext/object/is-value":145,"es5-ext/string/is-string":160,"es6-symbol":169}],167:[function(t,e,r){"use strict";var n,i=t("es5-ext/object/set-prototype-of"),a=t("d"),o=t("es6-symbol"),s=t("./"),l=Object.defineProperty;n=e.exports=function(t){if(!(this instanceof n))throw new TypeError("Constructor requires 'new'");t=String(t),s.call(this,t),l(this,"__length__",a("",t.length))},i&&i(n,s),delete n.prototype.constructor,n.prototype=Object.create(s.prototype,{_next:a((function(){if(this.__list__)return this.__nextIndex__=55296&&e<=56319?r+this.__list__[this.__nextIndex__++]:r}))}),l(n.prototype,o.toStringTag,a("c","String Iterator"))},{"./":165,d:101,"es5-ext/object/set-prototype-of":151,"es6-symbol":169}],168:[function(t,e,r){"use strict";var n=t("./is-iterable");e.exports=function(t){if(!n(t))throw new TypeError(t+" is not iterable");return t}},{"./is-iterable":166}],169:[function(t,e,r){"use strict";e.exports=t("./is-implemented")()?t("ext/global-this").Symbol:t("./polyfill")},{"./is-implemented":170,"./polyfill":175,"ext/global-this":183}],170:[function(t,e,r){"use strict";var n=t("ext/global-this"),i={object:!0,symbol:!0};e.exports=function(){var t,e=n.Symbol;if("function"!=typeof e)return!1;t=e("test symbol");try{String(t)}catch(t){return!1}return!!i[typeof e.iterator]&&(!!i[typeof e.toPrimitive]&&!!i[typeof e.toStringTag])}},{"ext/global-this":183}],171:[function(t,e,r){"use strict";e.exports=function(t){return!!t&&("symbol"==typeof t||!!t.constructor&&("Symbol"===t.constructor.name&&"Symbol"===t[t.constructor.toStringTag]))}},{}],172:[function(t,e,r){"use strict";var n=t("d"),i=Object.create,a=Object.defineProperty,o=Object.prototype,s=i(null);e.exports=function(t){for(var e,r,i=0;s[t+(i||"")];)++i;return s[t+=i||""]=!0,a(o,e="@@"+t,n.gs(null,(function(t){r||(r=!0,a(this,e,n(t)),r=!1)}))),e}},{d:101}],173:[function(t,e,r){"use strict";var n=t("d"),i=t("ext/global-this").Symbol;e.exports=function(t){return Object.defineProperties(t,{hasInstance:n("",i&&i.hasInstance||t("hasInstance")),isConcatSpreadable:n("",i&&i.isConcatSpreadable||t("isConcatSpreadable")),iterator:n("",i&&i.iterator||t("iterator")),match:n("",i&&i.match||t("match")),replace:n("",i&&i.replace||t("replace")),search:n("",i&&i.search||t("search")),species:n("",i&&i.species||t("species")),split:n("",i&&i.split||t("split")),toPrimitive:n("",i&&i.toPrimitive||t("toPrimitive")),toStringTag:n("",i&&i.toStringTag||t("toStringTag")),unscopables:n("",i&&i.unscopables||t("unscopables"))})}},{d:101,"ext/global-this":183}],174:[function(t,e,r){"use strict";var n=t("d"),i=t("../../../validate-symbol"),a=Object.create(null);e.exports=function(t){return Object.defineProperties(t,{for:n((function(e){return a[e]?a[e]:a[e]=t(String(e))})),keyFor:n((function(t){var e;for(e in i(t),a)if(a[e]===t)return e}))})}},{"../../../validate-symbol":176,d:101}],175:[function(t,e,r){"use strict";var n,i,a,o=t("d"),s=t("./validate-symbol"),l=t("ext/global-this").Symbol,c=t("./lib/private/generate-name"),u=t("./lib/private/setup/standard-symbols"),f=t("./lib/private/setup/symbol-registry"),h=Object.create,p=Object.defineProperties,d=Object.defineProperty;if("function"==typeof l)try{String(l()),a=!0}catch(t){}else l=null;i=function(t){if(this instanceof i)throw new TypeError("Symbol is not a constructor");return n(t)},e.exports=n=function t(e){var r;if(this instanceof t)throw new TypeError("Symbol is not a constructor");return a?l(e):(r=h(i.prototype),e=void 0===e?"":String(e),p(r,{__description__:o("",e),__name__:o("",c(e))}))},u(n),f(n),p(i.prototype,{constructor:o(n),toString:o("",(function(){return this.__name__}))}),p(n.prototype,{toString:o((function(){return"Symbol ("+s(this).__description__+")"})),valueOf:o((function(){return s(this)}))}),d(n.prototype,n.toPrimitive,o("",(function(){var t=s(this);return"symbol"==typeof t?t:t.toString()}))),d(n.prototype,n.toStringTag,o("c","Symbol")),d(i.prototype,n.toStringTag,o("c",n.prototype[n.toStringTag])),d(i.prototype,n.toPrimitive,o("c",n.prototype[n.toPrimitive]))},{"./lib/private/generate-name":172,"./lib/private/setup/standard-symbols":173,"./lib/private/setup/symbol-registry":174,"./validate-symbol":176,d:101,"ext/global-this":183}],176:[function(t,e,r){"use strict";var n=t("./is-symbol");e.exports=function(t){if(!n(t))throw new TypeError(t+" is not a symbol");return t}},{"./is-symbol":171}],177:[function(t,e,r){"use strict";e.exports=t("./is-implemented")()?WeakMap:t("./polyfill")},{"./is-implemented":178,"./polyfill":180}],178:[function(t,e,r){"use strict";e.exports=function(){var t,e;if("function"!=typeof WeakMap)return!1;try{t=new WeakMap([[e={},"one"],[{},"two"],[{},"three"]])}catch(t){return!1}return"[object WeakMap]"===String(t)&&("function"==typeof t.set&&(t.set({},1)===t&&("function"==typeof t.delete&&("function"==typeof t.has&&"one"===t.get(e)))))}},{}],179:[function(t,e,r){"use strict";e.exports="function"==typeof WeakMap&&"[object WeakMap]"===Object.prototype.toString.call(new WeakMap)},{}],180:[function(t,e,r){"use strict";var n,i=t("es5-ext/object/is-value"),a=t("es5-ext/object/set-prototype-of"),o=t("es5-ext/object/valid-object"),s=t("es5-ext/object/valid-value"),l=t("es5-ext/string/random-uniq"),c=t("d"),u=t("es6-iterator/get"),f=t("es6-iterator/for-of"),h=t("es6-symbol").toStringTag,p=t("./is-native-implemented"),d=Array.isArray,m=Object.defineProperty,g=Object.prototype.hasOwnProperty,v=Object.getPrototypeOf;e.exports=n=function(){var t,e=arguments[0];if(!(this instanceof n))throw new TypeError("Constructor requires 'new'");return t=p&&a&&WeakMap!==n?a(new WeakMap,v(this)):this,i(e)&&(d(e)||(e=u(e))),m(t,"__weakMapData__",c("c","$weakMap$"+l())),e?(f(e,(function(e){s(e),t.set(e[0],e[1])})),t):t},p&&(a&&a(n,WeakMap),n.prototype=Object.create(WeakMap.prototype,{constructor:c(n)})),Object.defineProperties(n.prototype,{delete:c((function(t){return!!g.call(o(t),this.__weakMapData__)&&(delete t[this.__weakMapData__],!0)})),get:c((function(t){if(g.call(o(t),this.__weakMapData__))return t[this.__weakMapData__]})),has:c((function(t){return g.call(o(t),this.__weakMapData__)})),set:c((function(t,e){return m(o(t),this.__weakMapData__,c("c",e)),this})),toString:c((function(){return"[object WeakMap]"}))}),m(n.prototype,h,c("c","WeakMap"))},{"./is-native-implemented":179,d:101,"es5-ext/object/is-value":145,"es5-ext/object/set-prototype-of":151,"es5-ext/object/valid-object":155,"es5-ext/object/valid-value":156,"es5-ext/string/random-uniq":161,"es6-iterator/for-of":163,"es6-iterator/get":164,"es6-symbol":169}],181:[function(t,e,r){"use strict";var n,i="object"==typeof Reflect?Reflect:null,a=i&&"function"==typeof i.apply?i.apply:function(t,e,r){return Function.prototype.apply.call(t,e,r)};n=i&&"function"==typeof i.ownKeys?i.ownKeys:Object.getOwnPropertySymbols?function(t){return Object.getOwnPropertyNames(t).concat(Object.getOwnPropertySymbols(t))}:function(t){return Object.getOwnPropertyNames(t)};var o=Number.isNaN||function(t){return t!=t};function s(){s.init.call(this)}e.exports=s,e.exports.once=function(t,e){return new Promise((function(r,n){function i(r){t.removeListener(e,a),n(r)}function a(){"function"==typeof t.removeListener&&t.removeListener("error",i),r([].slice.call(arguments))}v(t,e,a,{once:!0}),"error"!==e&&function(t,e,r){"function"==typeof t.on&&v(t,"error",e,r)}(t,i,{once:!0})}))},s.EventEmitter=s,s.prototype._events=void 0,s.prototype._eventsCount=0,s.prototype._maxListeners=void 0;var l=10;function c(t){if("function"!=typeof t)throw new TypeError('The "listener" argument must be of type Function. Received type '+typeof t)}function u(t){return void 0===t._maxListeners?s.defaultMaxListeners:t._maxListeners}function f(t,e,r,n){var i,a,o,s;if(c(r),void 0===(a=t._events)?(a=t._events=Object.create(null),t._eventsCount=0):(void 0!==a.newListener&&(t.emit("newListener",e,r.listener?r.listener:r),a=t._events),o=a[e]),void 0===o)o=a[e]=r,++t._eventsCount;else if("function"==typeof o?o=a[e]=n?[r,o]:[o,r]:n?o.unshift(r):o.push(r),(i=u(t))>0&&o.length>i&&!o.warned){o.warned=!0;var l=new Error("Possible EventEmitter memory leak detected. "+o.length+" "+String(e)+" listeners added. Use emitter.setMaxListeners() to increase limit");l.name="MaxListenersExceededWarning",l.emitter=t,l.type=e,l.count=o.length,s=l,console&&console.warn&&console.warn(s)}return t}function h(){if(!this.fired)return this.target.removeListener(this.type,this.wrapFn),this.fired=!0,0===arguments.length?this.listener.call(this.target):this.listener.apply(this.target,arguments)}function p(t,e,r){var n={fired:!1,wrapFn:void 0,target:t,type:e,listener:r},i=h.bind(n);return i.listener=r,n.wrapFn=i,i}function d(t,e,r){var n=t._events;if(void 0===n)return[];var i=n[e];return void 0===i?[]:"function"==typeof i?r?[i.listener||i]:[i]:r?function(t){for(var e=new Array(t.length),r=0;r0&&(o=e[0]),o instanceof Error)throw o;var s=new Error("Unhandled error."+(o?" ("+o.message+")":""));throw s.context=o,s}var l=i[t];if(void 0===l)return!1;if("function"==typeof l)a(l,this,e);else{var c=l.length,u=g(l,c);for(r=0;r=0;a--)if(r[a]===e||r[a].listener===e){o=r[a].listener,i=a;break}if(i<0)return this;0===i?r.shift():function(t,e){for(;e+1=0;n--)this.removeListener(t,e[n]);return this},s.prototype.listeners=function(t){return d(this,t,!0)},s.prototype.rawListeners=function(t){return d(this,t,!1)},s.listenerCount=function(t,e){return"function"==typeof t.listenerCount?t.listenerCount(e):m.call(t,e)},s.prototype.listenerCount=m,s.prototype.eventNames=function(){return this._eventsCount>0?n(this._events):[]}},{}],182:[function(t,e,r){var n=function(){if("object"==typeof self&&self)return self;if("object"==typeof window&&window)return window;throw new Error("Unable to resolve global `this`")};e.exports=function(){if(this)return this;try{Object.defineProperty(Object.prototype,"__global__",{get:function(){return this},configurable:!0})}catch(t){return n()}try{return __global__||n()}finally{delete Object.prototype.__global__}}()},{}],183:[function(t,e,r){"use strict";e.exports=t("./is-implemented")()?globalThis:t("./implementation")},{"./implementation":182,"./is-implemented":184}],184:[function(t,e,r){"use strict";e.exports=function(){return"object"==typeof globalThis&&(!!globalThis&&globalThis.Array===Array)}},{}],185:[function(t,e,r){"use strict";var n=t("is-string-blank");e.exports=function(t){var e=typeof t;if("string"===e){var r=t;if(0===(t=+t)&&n(r))return!1}else if("number"!==e)return!1;return t-t<1}},{"is-string-blank":232}],186:[function(t,e,r){var n=t("dtype");e.exports=function(t,e,r){if(!t)throw new TypeError("must specify data as first parameter");if(r=0|+(r||0),Array.isArray(t)&&t[0]&&"number"==typeof t[0][0]){var i,a,o,s,l=t[0].length,c=t.length*l;e&&"string"!=typeof e||(e=new(n(e||"float32"))(c+r));var u=e.length-r;if(c!==u)throw new Error("source length "+c+" ("+l+"x"+t.length+") does not match destination length "+u);for(i=0,o=r;ie[0]-o[0]/2&&(h=o[0]/2,p+=o[1]);return r}},{"css-font/stringify":97}],188:[function(t,e,r){"use strict";function n(t,e){e||(e={}),("string"==typeof t||Array.isArray(t))&&(e.family=t);var r=Array.isArray(e.family)?e.family.join(", "):e.family;if(!r)throw Error("`family` must be defined");var s=e.size||e.fontSize||e.em||48,l=e.weight||e.fontWeight||"",c=(t=[e.style||e.fontStyle||"",l,s].join(" ")+"px "+r,e.origin||"top");if(n.cache[r]&&s<=n.cache[r].em)return i(n.cache[r],c);var u=e.canvas||n.canvas,f=u.getContext("2d"),h={upper:void 0!==e.upper?e.upper:"H",lower:void 0!==e.lower?e.lower:"x",descent:void 0!==e.descent?e.descent:"p",ascent:void 0!==e.ascent?e.ascent:"h",tittle:void 0!==e.tittle?e.tittle:"i",overshoot:void 0!==e.overshoot?e.overshoot:"O"},p=Math.ceil(1.5*s);u.height=p,u.width=.5*p,f.font=t;var d={top:0};f.clearRect(0,0,p,p),f.textBaseline="top",f.fillStyle="black",f.fillText("H",0,0);var m=a(f.getImageData(0,0,p,p));f.clearRect(0,0,p,p),f.textBaseline="bottom",f.fillText("H",0,p);var g=a(f.getImageData(0,0,p,p));d.lineHeight=d.bottom=p-g+m,f.clearRect(0,0,p,p),f.textBaseline="alphabetic",f.fillText("H",0,p);var v=p-a(f.getImageData(0,0,p,p))-1+m;d.baseline=d.alphabetic=v,f.clearRect(0,0,p,p),f.textBaseline="middle",f.fillText("H",0,.5*p);var y=a(f.getImageData(0,0,p,p));d.median=d.middle=p-y-1+m-.5*p,f.clearRect(0,0,p,p),f.textBaseline="hanging",f.fillText("H",0,.5*p);var x=a(f.getImageData(0,0,p,p));d.hanging=p-x-1+m-.5*p,f.clearRect(0,0,p,p),f.textBaseline="ideographic",f.fillText("H",0,p);var b=a(f.getImageData(0,0,p,p));if(d.ideographic=p-b-1+m,h.upper&&(f.clearRect(0,0,p,p),f.textBaseline="top",f.fillText(h.upper,0,0),d.upper=a(f.getImageData(0,0,p,p)),d.capHeight=d.baseline-d.upper),h.lower&&(f.clearRect(0,0,p,p),f.textBaseline="top",f.fillText(h.lower,0,0),d.lower=a(f.getImageData(0,0,p,p)),d.xHeight=d.baseline-d.lower),h.tittle&&(f.clearRect(0,0,p,p),f.textBaseline="top",f.fillText(h.tittle,0,0),d.tittle=a(f.getImageData(0,0,p,p))),h.ascent&&(f.clearRect(0,0,p,p),f.textBaseline="top",f.fillText(h.ascent,0,0),d.ascent=a(f.getImageData(0,0,p,p))),h.descent&&(f.clearRect(0,0,p,p),f.textBaseline="top",f.fillText(h.descent,0,0),d.descent=o(f.getImageData(0,0,p,p))),h.overshoot){f.clearRect(0,0,p,p),f.textBaseline="top",f.fillText(h.overshoot,0,0);var _=o(f.getImageData(0,0,p,p));d.overshoot=_-v}for(var w in d)d[w]/=s;return d.em=s,n.cache[r]=d,i(d,c)}function i(t,e){var r={};for(var n in"string"==typeof e&&(e=t[e]),t)"em"!==n&&(r[n]=t[n]-e);return r}function a(t){for(var e=t.height,r=t.data,n=3;n0;n-=4)if(0!==r[n])return Math.floor(.25*(n-3)/e)}e.exports=n,n.canvas=document.createElement("canvas"),n.cache={}},{}],189:[function(t,e,r){e.exports=function(t,e){if("string"!=typeof t)throw new TypeError("must specify type string");if(e=e||{},"undefined"==typeof document&&!e.canvas)return null;var r=e.canvas||document.createElement("canvas");"number"==typeof e.width&&(r.width=e.width);"number"==typeof e.height&&(r.height=e.height);var n,i=e;try{var a=[t];0===t.indexOf("webgl")&&a.push("experimental-"+t);for(var o=0;o halfCharStep + halfCharWidth ||\n\t\t\t\t\tfloor(uv.x) < halfCharStep - halfCharWidth) return;\n\n\t\t\t\tuv += charId * charStep;\n\t\t\t\tuv = uv / atlasSize;\n\n\t\t\t\tvec4 color = fontColor;\n\t\t\t\tvec4 mask = texture2D(atlas, uv);\n\n\t\t\t\tfloat maskY = lightness(mask);\n\t\t\t\t// float colorY = lightness(color);\n\t\t\t\tcolor.a *= maskY;\n\t\t\t\tcolor.a *= opacity;\n\n\t\t\t\t// color.a += .1;\n\n\t\t\t\t// antialiasing, see yiq color space y-channel formula\n\t\t\t\t// color.rgb += (1. - color.rgb) * (1. - mask.rgb);\n\n\t\t\t\tgl_FragColor = color;\n\t\t\t}"});return{regl:t,draw:e,atlas:{}}},T.prototype.update=function(t){var e=this;if("string"==typeof t)t={text:t};else if(!t)return;null!=(t=i(t,{position:"position positions coord coords coordinates",font:"font fontFace fontface typeface cssFont css-font family fontFamily",fontSize:"fontSize fontsize size font-size",text:"text texts chars characters value values symbols",align:"align alignment textAlign textbaseline",baseline:"baseline textBaseline textbaseline",direction:"dir direction textDirection",color:"color colour fill fill-color fillColor textColor textcolor",kerning:"kerning kern",range:"range dataBox",viewport:"vp viewport viewBox viewbox viewPort",opacity:"opacity alpha transparency visible visibility opaque",offset:"offset positionOffset padding shift indent indentation"},!0)).opacity&&(Array.isArray(t.opacity)?this.opacity=t.opacity.map((function(t){return parseFloat(t)})):this.opacity=parseFloat(t.opacity)),null!=t.viewport&&(this.viewport=f(t.viewport),this.viewportArray=[this.viewport.x,this.viewport.y,this.viewport.width,this.viewport.height]),null==this.viewport&&(this.viewport={x:0,y:0,width:this.gl.drawingBufferWidth,height:this.gl.drawingBufferHeight},this.viewportArray=[this.viewport.x,this.viewport.y,this.viewport.width,this.viewport.height]),null!=t.kerning&&(this.kerning=t.kerning),null!=t.offset&&("number"==typeof t.offset&&(t.offset=[t.offset,0]),this.positionOffset=y(t.offset)),t.direction&&(this.direction=t.direction),t.range&&(this.range=t.range,this.scale=[1/(t.range[2]-t.range[0]),1/(t.range[3]-t.range[1])],this.translate=[-t.range[0],-t.range[1]]),t.scale&&(this.scale=t.scale),t.translate&&(this.translate=t.translate),this.scale||(this.scale=[1/this.viewport.width,1/this.viewport.height]),this.translate||(this.translate=[0,0]),this.font.length||t.font||(t.font=T.baseFontSize+"px sans-serif");var r,a=!1,o=!1;if(t.font&&(Array.isArray(t.font)?t.font:[t.font]).forEach((function(t,r){if("string"==typeof t)try{t=n.parse(t)}catch(e){t=n.parse(T.baseFontSize+"px "+t)}else t=n.parse(n.stringify(t));var i=n.stringify({size:T.baseFontSize,family:t.family,stretch:_?t.stretch:void 0,variant:t.variant,weight:t.weight,style:t.style}),s=p(t.size),l=Math.round(s[0]*d(s[1]));if(l!==e.fontSize[r]&&(o=!0,e.fontSize[r]=l),!(e.font[r]&&i==e.font[r].baseString||(a=!0,e.font[r]=T.fonts[i],e.font[r]))){var c=t.family.join(", "),u=[t.style];t.style!=t.variant&&u.push(t.variant),t.variant!=t.weight&&u.push(t.weight),_&&t.weight!=t.stretch&&u.push(t.stretch),e.font[r]={baseString:i,family:c,weight:t.weight,stretch:t.stretch,style:t.style,variant:t.variant,width:{},kerning:{},metrics:v(c,{origin:"top",fontSize:T.baseFontSize,fontStyle:u.join(" ")})},T.fonts[i]=e.font[r]}})),(a||o)&&this.font.forEach((function(r,i){var a=n.stringify({size:e.fontSize[i],family:r.family,stretch:_?r.stretch:void 0,variant:r.variant,weight:r.weight,style:r.style});if(e.fontAtlas[i]=e.shader.atlas[a],!e.fontAtlas[i]){var o=r.metrics;e.shader.atlas[a]=e.fontAtlas[i]={fontString:a,step:2*Math.ceil(e.fontSize[i]*o.bottom*.5),em:e.fontSize[i],cols:0,rows:0,height:0,width:0,chars:[],ids:{},texture:e.regl.texture()}}null==t.text&&(t.text=e.text)})),"string"==typeof t.text&&t.position&&t.position.length>2){for(var s=Array(.5*t.position.length),h=0;h2){for(var w=!t.position[0].length,k=u.mallocFloat(2*this.count),A=0,M=0;A1?e.align[r]:e.align[0]:e.align;if("number"==typeof n)return n;switch(n){case"right":case"end":return-t;case"center":case"centre":case"middle":return.5*-t}return 0}))),null==this.baseline&&null==t.baseline&&(t.baseline=0),null!=t.baseline&&(this.baseline=t.baseline,Array.isArray(this.baseline)||(this.baseline=[this.baseline]),this.baselineOffset=this.baseline.map((function(t,r){var n=(e.font[r]||e.font[0]).metrics,i=0;return i+=.5*n.bottom,i+="number"==typeof t?t-n.baseline:-n[t],i*=-1}))),null!=t.color)if(t.color||(t.color="transparent"),"string"!=typeof t.color&&isNaN(t.color)){var q;if("number"==typeof t.color[0]&&t.color.length>this.counts.length){var G=t.color.length;q=u.mallocUint8(G);for(var Y=(t.color.subarray||t.color.slice).bind(t.color),W=0;W4||this.baselineOffset.length>1||this.align&&this.align.length>1||this.fontAtlas.length>1||this.positionOffset.length>2){var J=Math.max(.5*this.position.length||0,.25*this.color.length||0,this.baselineOffset.length||0,this.alignOffset.length||0,this.font.length||0,this.opacity.length||0,.5*this.positionOffset.length||0);this.batch=Array(J);for(var K=0;K1?this.counts[K]:this.counts[0],offset:this.textOffsets.length>1?this.textOffsets[K]:this.textOffsets[0],color:this.color?this.color.length<=4?this.color:this.color.subarray(4*K,4*K+4):[0,0,0,255],opacity:Array.isArray(this.opacity)?this.opacity[K]:this.opacity,baseline:null!=this.baselineOffset[K]?this.baselineOffset[K]:this.baselineOffset[0],align:this.align?null!=this.alignOffset[K]?this.alignOffset[K]:this.alignOffset[0]:0,atlas:this.fontAtlas[K]||this.fontAtlas[0],positionOffset:this.positionOffset.length>2?this.positionOffset.subarray(2*K,2*K+2):this.positionOffset}}else this.count?this.batch=[{count:this.count,offset:0,color:this.color||[0,0,0,255],opacity:Array.isArray(this.opacity)?this.opacity[0]:this.opacity,baseline:this.baselineOffset[0],align:this.alignOffset?this.alignOffset[0]:0,atlas:this.fontAtlas[0],positionOffset:this.positionOffset}]:this.batch=[]},T.prototype.destroy=function(){},T.prototype.kerning=!0,T.prototype.position={constant:new Float32Array(2)},T.prototype.translate=null,T.prototype.scale=null,T.prototype.font=null,T.prototype.text="",T.prototype.positionOffset=[0,0],T.prototype.opacity=1,T.prototype.color=new Uint8Array([0,0,0,255]),T.prototype.alignOffset=[0,0],T.maxAtlasSize=1024,T.atlasCanvas=document.createElement("canvas"),T.atlasContext=T.atlasCanvas.getContext("2d",{alpha:!1}),T.baseFontSize=64,T.fonts={},e.exports=T},{"bit-twiddle":77,"color-normalize":84,"css-font":94,"detect-kerning":119,"es6-weak-map":177,"flatten-vertex-data":186,"font-atlas":187,"font-measure":188,"gl-util/context":221,"is-plain-obj":231,"object-assign":242,"parse-rect":244,"parse-unit":246,"pick-by-alias":248,regl:278,"to-px":309,"typedarray-pool":322}],221:[function(t,e,r){(function(r){(function(){"use strict";var n=t("pick-by-alias");function i(t){if(t.container)if(t.container==document.body)document.body.style.width||(t.canvas.width=t.width||t.pixelRatio*r.innerWidth),document.body.style.height||(t.canvas.height=t.height||t.pixelRatio*r.innerHeight);else{var e=t.container.getBoundingClientRect();t.canvas.width=t.width||e.right-e.left,t.canvas.height=t.height||e.bottom-e.top}}function a(t){return"function"==typeof t.getContext&&"width"in t&&"height"in t}function o(){var t=document.createElement("canvas");return t.style.position="absolute",t.style.top=0,t.style.left=0,t}e.exports=function(t){var e;if(t?"string"==typeof t&&(t={container:t}):t={},a(t)?t={container:t}:t="string"==typeof(e=t).nodeName&&"function"==typeof e.appendChild&&"function"==typeof e.getBoundingClientRect?{container:t}:function(t){return"function"==typeof t.drawArrays||"function"==typeof t.drawElements}(t)?{gl:t}:n(t,{container:"container target element el canvas holder parent parentNode wrapper use ref root node",gl:"gl context webgl glContext",attrs:"attributes attrs contextAttributes",pixelRatio:"pixelRatio pxRatio px ratio pxratio pixelratio",width:"w width",height:"h height"},!0),t.pixelRatio||(t.pixelRatio=r.pixelRatio||1),t.gl)return t.gl;if(t.canvas&&(t.container=t.canvas.parentNode),t.container){if("string"==typeof t.container){var s=document.querySelector(t.container);if(!s)throw Error("Element "+t.container+" is not found");t.container=s}a(t.container)?(t.canvas=t.container,t.container=t.canvas.parentNode):t.canvas||(t.canvas=o(),t.container.appendChild(t.canvas),i(t))}else if(!t.canvas){if("undefined"==typeof document)throw Error("Not DOM environment. Use headless-gl.");t.container=document.body||document.documentElement,t.canvas=o(),t.container.appendChild(t.canvas),i(t)}return t.gl||["webgl","experimental-webgl","webgl-experimental"].some((function(e){try{t.gl=t.canvas.getContext(e,t.attrs)}catch(t){}return t.gl})),t.gl}}).call(this)}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"pick-by-alias":248}],222:[function(t,e,r){e.exports=function(t){"string"==typeof t&&(t=[t]);for(var e=[].slice.call(arguments,1),r=[],n=0;n */ -r.read=function(t,e,r,n,i){var a,o,s=8*i-n-1,l=(1<>1,u=-7,f=r?i-1:0,h=r?-1:1,p=t[e+f];for(f+=h,a=p&(1<<-u)-1,p>>=-u,u+=s;u>0;a=256*a+t[e+f],f+=h,u-=8);for(o=a&(1<<-u)-1,a>>=-u,u+=n;u>0;o=256*o+t[e+f],f+=h,u-=8);if(0===a)a=1-c;else{if(a===l)return o?NaN:1/0*(p?-1:1);o+=Math.pow(2,n),a-=c}return(p?-1:1)*o*Math.pow(2,a-n)},r.write=function(t,e,r,n,i,a){var o,s,l,c=8*a-i-1,u=(1<>1,h=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,p=n?0:a-1,d=n?1:-1,m=e<0||0===e&&1/e<0?1:0;for(e=Math.abs(e),isNaN(e)||e===1/0?(s=isNaN(e)?1:0,o=u):(o=Math.floor(Math.log(e)/Math.LN2),e*(l=Math.pow(2,-o))<1&&(o--,l*=2),(e+=o+f>=1?h/l:h*Math.pow(2,1-f))*l>=2&&(o++,l/=2),o+f>=u?(s=0,o=u):o+f>=1?(s=(e*l-1)*Math.pow(2,i),o+=f):(s=e*Math.pow(2,f-1)*Math.pow(2,i),o=0));i>=8;t[r+p]=255&s,p+=d,s/=256,i-=8);for(o=o<0;t[r+p]=255&o,p+=d,o/=256,c-=8);t[r+p-d]|=128*m}},{}],226:[function(t,e,r){"function"==typeof Object.create?e.exports=function(t,e){e&&(t.super_=e,t.prototype=Object.create(e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}))}:e.exports=function(t,e){if(e){t.super_=e;var r=function(){};r.prototype=e.prototype,t.prototype=new r,t.prototype.constructor=t}}},{}],227:[function(t,e,r){e.exports=!0},{}],228:[function(t,e,r){"use strict";e.exports="undefined"!=typeof navigator&&(/MSIE/.test(navigator.userAgent)||/Trident\//.test(navigator.appVersion))},{}],229:[function(t,e,r){"use strict";e.exports=a,e.exports.isMobile=a,e.exports.default=a;var n=/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series[46]0|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino/i,i=/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series[46]0|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino|android|ipad|playbook|silk/i;function a(t){t||(t={});var e=t.ua;if(e||"undefined"==typeof navigator||(e=navigator.userAgent),e&&e.headers&&"string"==typeof e.headers["user-agent"]&&(e=e.headers["user-agent"]),"string"!=typeof e)return!1;var r=t.tablet?i.test(e):n.test(e);return!r&&t.tablet&&t.featureDetect&&navigator&&navigator.maxTouchPoints>1&&-1!==e.indexOf("Macintosh")&&-1!==e.indexOf("Safari")&&(r=!0),r}},{}],230:[function(t,e,r){"use strict";e.exports=function(t){var e=typeof t;return null!==t&&("object"===e||"function"===e)}},{}],231:[function(t,e,r){"use strict";var n=Object.prototype.toString;e.exports=function(t){var e;return"[object Object]"===n.call(t)&&(null===(e=Object.getPrototypeOf(t))||e===Object.getPrototypeOf({}))}},{}],232:[function(t,e,r){"use strict";e.exports=function(t){for(var e,r=t.length,n=0;n13)&&32!==e&&133!==e&&160!==e&&5760!==e&&6158!==e&&(e<8192||e>8205)&&8232!==e&&8233!==e&&8239!==e&&8287!==e&&8288!==e&&12288!==e&&65279!==e)return!1;return!0}},{}],233:[function(t,e,r){"use strict";e.exports=function(t){return"string"==typeof t&&(t=t.trim(),!!(/^[mzlhvcsqta]\s*[-+.0-9][^mlhvzcsqta]+/i.test(t)&&/[\dz]$/i.test(t)&&t.length>4))}},{}],234:[function(t,e,r){!function(t,n){"object"==typeof r&&void 0!==e?e.exports=n():(t=t||self).mapboxgl=n()}(this,(function(){"use strict";var t,e,r;function n(n,i){if(t)if(e){var a="var sharedChunk = {}; ("+t+")(sharedChunk); ("+e+")(sharedChunk);",o={};t(o),(r=i(o)).workerUrl=window.URL.createObjectURL(new Blob([a],{type:"text/javascript"}))}else e=i;else t=i}return n(0,(function(t){function e(t,e){return t(e={exports:{}},e.exports),e.exports}var r=n;function n(t,e,r,n){this.cx=3*t,this.bx=3*(r-t)-this.cx,this.ax=1-this.cx-this.bx,this.cy=3*e,this.by=3*(n-e)-this.cy,this.ay=1-this.cy-this.by,this.p1x=t,this.p1y=n,this.p2x=r,this.p2y=n}n.prototype.sampleCurveX=function(t){return((this.ax*t+this.bx)*t+this.cx)*t},n.prototype.sampleCurveY=function(t){return((this.ay*t+this.by)*t+this.cy)*t},n.prototype.sampleCurveDerivativeX=function(t){return(3*this.ax*t+2*this.bx)*t+this.cx},n.prototype.solveCurveX=function(t,e){var r,n,i,a,o;for(void 0===e&&(e=1e-6),i=t,o=0;o<8;o++){if(a=this.sampleCurveX(i)-t,Math.abs(a)(n=1))return n;for(;ra?r=i:n=i,i=.5*(n-r)+r}return i},n.prototype.solve=function(t,e){return this.sampleCurveY(this.solveCurveX(t,e))};var i=a;function a(t,e){this.x=t,this.y=e}function o(t,e,n,i){var a=new r(t,e,n,i);return function(t){return a.solve(t)}}a.prototype={clone:function(){return new a(this.x,this.y)},add:function(t){return this.clone()._add(t)},sub:function(t){return this.clone()._sub(t)},multByPoint:function(t){return this.clone()._multByPoint(t)},divByPoint:function(t){return this.clone()._divByPoint(t)},mult:function(t){return this.clone()._mult(t)},div:function(t){return this.clone()._div(t)},rotate:function(t){return this.clone()._rotate(t)},rotateAround:function(t,e){return this.clone()._rotateAround(t,e)},matMult:function(t){return this.clone()._matMult(t)},unit:function(){return this.clone()._unit()},perp:function(){return this.clone()._perp()},round:function(){return this.clone()._round()},mag:function(){return Math.sqrt(this.x*this.x+this.y*this.y)},equals:function(t){return this.x===t.x&&this.y===t.y},dist:function(t){return Math.sqrt(this.distSqr(t))},distSqr:function(t){var e=t.x-this.x,r=t.y-this.y;return e*e+r*r},angle:function(){return Math.atan2(this.y,this.x)},angleTo:function(t){return Math.atan2(this.y-t.y,this.x-t.x)},angleWith:function(t){return this.angleWithSep(t.x,t.y)},angleWithSep:function(t,e){return Math.atan2(this.x*e-this.y*t,this.x*t+this.y*e)},_matMult:function(t){var e=t[0]*this.x+t[1]*this.y,r=t[2]*this.x+t[3]*this.y;return this.x=e,this.y=r,this},_add:function(t){return this.x+=t.x,this.y+=t.y,this},_sub:function(t){return this.x-=t.x,this.y-=t.y,this},_mult:function(t){return this.x*=t,this.y*=t,this},_div:function(t){return this.x/=t,this.y/=t,this},_multByPoint:function(t){return this.x*=t.x,this.y*=t.y,this},_divByPoint:function(t){return this.x/=t.x,this.y/=t.y,this},_unit:function(){return this._div(this.mag()),this},_perp:function(){var t=this.y;return this.y=this.x,this.x=-t,this},_rotate:function(t){var e=Math.cos(t),r=Math.sin(t),n=e*this.x-r*this.y,i=r*this.x+e*this.y;return this.x=n,this.y=i,this},_rotateAround:function(t,e){var r=Math.cos(t),n=Math.sin(t),i=e.x+r*(this.x-e.x)-n*(this.y-e.y),a=e.y+n*(this.x-e.x)+r*(this.y-e.y);return this.x=i,this.y=a,this},_round:function(){return this.x=Math.round(this.x),this.y=Math.round(this.y),this}},a.convert=function(t){return t instanceof a?t:Array.isArray(t)?new a(t[0],t[1]):t};var s=o(.25,.1,.25,1);function l(t,e,r){return Math.min(r,Math.max(e,t))}function c(t,e,r){var n=r-e,i=((t-e)%n+n)%n+e;return i===e?r:i}function u(t){for(var e=[],r=arguments.length-1;r-- >0;)e[r]=arguments[r+1];for(var n=0,i=e;n>e/4).toString(16):([1e7]+-[1e3]+-4e3+-8e3+-1e11).replace(/[018]/g,t)}()}function d(t){return!!t&&/^[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i.test(t)}function m(t,e){t.forEach((function(t){e[t]&&(e[t]=e[t].bind(e))}))}function g(t,e){return-1!==t.indexOf(e,t.length-e.length)}function v(t,e,r){var n={};for(var i in t)n[i]=e.call(r||this,t[i],i,t);return n}function y(t,e,r){var n={};for(var i in t)e.call(r||this,t[i],i,t)&&(n[i]=t[i]);return n}function x(t){return Array.isArray(t)?t.map(x):"object"==typeof t&&t?v(t,x):t}var b={};function _(t){b[t]||("undefined"!=typeof console&&console.warn(t),b[t]=!0)}function w(t,e,r){return(r.y-t.y)*(e.x-t.x)>(e.y-t.y)*(r.x-t.x)}function T(t){for(var e=0,r=0,n=t.length,i=n-1,a=void 0,o=void 0;r@\,;\:\\"\/\[\]\?\=\{\}\x7F]+)(?:\=(?:([^\x00-\x20\(\)<>@\,;\:\\"\/\[\]\?\=\{\}\x7F]+)|(?:\"((?:[^"\\]|\\.)*)\")))?/g,(function(t,r,n,i){var a=n||i;return e[r]=!a||a.toLowerCase(),""})),e["max-age"]){var r=parseInt(e["max-age"],10);isNaN(r)?delete e["max-age"]:e["max-age"]=r}return e}var M=null;function S(t){if(null==M){var e=t.navigator?t.navigator.userAgent:null;M=!!t.safari||!(!e||!(/\b(iPad|iPhone|iPod)\b/.test(e)||e.match("Safari")&&!e.match("Chrome")))}return M}function E(t){try{var e=self[t];return e.setItem("_mapbox_test_",1),e.removeItem("_mapbox_test_"),!0}catch(t){return!1}}var L,C,P,I,O=self.performance&&self.performance.now?self.performance.now.bind(self.performance):Date.now.bind(Date),z=self.requestAnimationFrame||self.mozRequestAnimationFrame||self.webkitRequestAnimationFrame||self.msRequestAnimationFrame,D=self.cancelAnimationFrame||self.mozCancelAnimationFrame||self.webkitCancelAnimationFrame||self.msCancelAnimationFrame,R={now:O,frame:function(t){var e=z(t);return{cancel:function(){return D(e)}}},getImageData:function(t,e){void 0===e&&(e=0);var r=self.document.createElement("canvas"),n=r.getContext("2d");if(!n)throw new Error("failed to create canvas 2d context");return r.width=t.width,r.height=t.height,n.drawImage(t,0,0,t.width,t.height),n.getImageData(-e,-e,t.width+2*e,t.height+2*e)},resolveURL:function(t){return L||(L=self.document.createElement("a")),L.href=t,L.href},hardwareConcurrency:self.navigator.hardwareConcurrency||4,get devicePixelRatio(){return self.devicePixelRatio},get prefersReducedMotion(){return!!self.matchMedia&&(null==C&&(C=self.matchMedia("(prefers-reduced-motion: reduce)")),C.matches)}},F={API_URL:"https://api.mapbox.com",get EVENTS_URL(){return this.API_URL?0===this.API_URL.indexOf("https://api.mapbox.cn")?"https://events.mapbox.cn/events/v2":0===this.API_URL.indexOf("https://api.mapbox.com")?"https://events.mapbox.com/events/v2":null:null},FEEDBACK_URL:"https://apps.mapbox.com/feedback",REQUIRE_ACCESS_TOKEN:!0,ACCESS_TOKEN:null,MAX_PARALLEL_IMAGE_REQUESTS:16},B={supported:!1,testSupport:function(t){if(N||!I)return;j?U(t):P=t}},N=!1,j=!1;function U(t){var e=t.createTexture();t.bindTexture(t.TEXTURE_2D,e);try{if(t.texImage2D(t.TEXTURE_2D,0,t.RGBA,t.RGBA,t.UNSIGNED_BYTE,I),t.isContextLost())return;B.supported=!0}catch(t){}t.deleteTexture(e),N=!0}self.document&&((I=self.document.createElement("img")).onload=function(){P&&U(P),P=null,j=!0},I.onerror=function(){N=!0,P=null},I.src="data:image/webp;base64,UklGRh4AAABXRUJQVlA4TBEAAAAvAQAAAAfQ//73v/+BiOh/AAA=");var V="01";var H=function(t,e){this._transformRequestFn=t,this._customAccessToken=e,this._createSkuToken()};function q(t){return 0===t.indexOf("mapbox:")}H.prototype._createSkuToken=function(){var t=function(){for(var t="",e=0;e<10;e++)t+="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"[Math.floor(62*Math.random())];return{token:["1",V,t].join(""),tokenExpiresAt:Date.now()+432e5}}();this._skuToken=t.token,this._skuTokenExpiresAt=t.tokenExpiresAt},H.prototype._isSkuTokenExpired=function(){return Date.now()>this._skuTokenExpiresAt},H.prototype.transformRequest=function(t,e){return this._transformRequestFn&&this._transformRequestFn(t,e)||{url:t}},H.prototype.normalizeStyleURL=function(t,e){if(!q(t))return t;var r=X(t);return r.path="/styles/v1"+r.path,this._makeAPIURL(r,this._customAccessToken||e)},H.prototype.normalizeGlyphsURL=function(t,e){if(!q(t))return t;var r=X(t);return r.path="/fonts/v1"+r.path,this._makeAPIURL(r,this._customAccessToken||e)},H.prototype.normalizeSourceURL=function(t,e){if(!q(t))return t;var r=X(t);return r.path="/v4/"+r.authority+".json",r.params.push("secure"),this._makeAPIURL(r,this._customAccessToken||e)},H.prototype.normalizeSpriteURL=function(t,e,r,n){var i=X(t);return q(t)?(i.path="/styles/v1"+i.path+"/sprite"+e+r,this._makeAPIURL(i,this._customAccessToken||n)):(i.path+=""+e+r,Z(i))},H.prototype.normalizeTileURL=function(t,e){if(this._isSkuTokenExpired()&&this._createSkuToken(),t&&!q(t))return t;var r=X(t),n=R.devicePixelRatio>=2||512===e?"@2x":"",i=B.supported?".webp":"$1";r.path=r.path.replace(/(\.(png|jpg)\d*)(?=$)/,""+n+i),r.path=r.path.replace(/^.+\/v4\//,"/"),r.path="/v4"+r.path;var a=this._customAccessToken||function(t){for(var e=0,r=t;e=1&&self.localStorage.setItem(e,JSON.stringify(this.eventData))}catch(t){_("Unable to write to LocalStorage")}},K.prototype.processRequests=function(t){},K.prototype.postEvent=function(t,e,r,n){var i=this;if(F.EVENTS_URL){var a=X(F.EVENTS_URL);a.params.push("access_token="+(n||F.ACCESS_TOKEN||""));var o={event:this.type,created:new Date(t).toISOString(),sdkIdentifier:"mapbox-gl-js",sdkVersion:"1.10.1",skuId:V,userId:this.anonId},s=e?u(o,e):o,l={url:Z(a),headers:{"Content-Type":"text/plain"},body:JSON.stringify([s])};this.pendingRequest=bt(l,(function(t){i.pendingRequest=null,r(t),i.saveEventData(),i.processRequests(n)}))}},K.prototype.queueRequest=function(t,e){this.queue.push(t),this.processRequests(e)};var Q,$,tt=function(t){function e(){t.call(this,"map.load"),this.success={},this.skuToken=""}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.postMapLoadEvent=function(t,e,r,n){this.skuToken=r,(F.EVENTS_URL&&n||F.ACCESS_TOKEN&&Array.isArray(t)&&t.some((function(t){return q(t)||Y(t)})))&&this.queueRequest({id:e,timestamp:Date.now()},n)},e.prototype.processRequests=function(t){var e=this;if(!this.pendingRequest&&0!==this.queue.length){var r=this.queue.shift(),n=r.id,i=r.timestamp;n&&this.success[n]||(this.anonId||this.fetchEventData(),d(this.anonId)||(this.anonId=p()),this.postEvent(i,{skuToken:this.skuToken},(function(t){t||n&&(e.success[n]=!0)}),t))}},e}(K),et=new(function(t){function e(e){t.call(this,"appUserTurnstile"),this._customAccessToken=e}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.postTurnstileEvent=function(t,e){F.EVENTS_URL&&F.ACCESS_TOKEN&&Array.isArray(t)&&t.some((function(t){return q(t)||Y(t)}))&&this.queueRequest(Date.now(),e)},e.prototype.processRequests=function(t){var e=this;if(!this.pendingRequest&&0!==this.queue.length){this.anonId&&this.eventData.lastSuccess&&this.eventData.tokenU||this.fetchEventData();var r=J(F.ACCESS_TOKEN),n=r?r.u:F.ACCESS_TOKEN,i=n!==this.eventData.tokenU;d(this.anonId)||(this.anonId=p(),i=!0);var a=this.queue.shift();if(this.eventData.lastSuccess){var o=new Date(this.eventData.lastSuccess),s=new Date(a),l=(a-this.eventData.lastSuccess)/864e5;i=i||l>=1||l<-1||o.getDate()!==s.getDate()}else i=!0;if(!i)return this.processRequests();this.postEvent(a,{"enabled.telemetry":!1},(function(t){t||(e.eventData.lastSuccess=a,e.eventData.tokenU=n)}),t)}},e}(K)),rt=et.postTurnstileEvent.bind(et),nt=new tt,it=nt.postMapLoadEvent.bind(nt),at=500,ot=50;function st(){self.caches&&!Q&&(Q=self.caches.open("mapbox-tiles"))}function lt(t,e,r){if(st(),Q){var n={status:e.status,statusText:e.statusText,headers:new self.Headers};e.headers.forEach((function(t,e){return n.headers.set(e,t)}));var i=A(e.headers.get("Cache-Control")||"");if(!i["no-store"])i["max-age"]&&n.headers.set("Expires",new Date(r+1e3*i["max-age"]).toUTCString()),new Date(n.headers.get("Expires")).getTime()-r<42e4||function(t,e){if(void 0===$)try{new Response(new ReadableStream),$=!0}catch(t){$=!1}$?e(t.body):t.blob().then(e)}(e,(function(e){var r=new self.Response(e,n);st(),Q&&Q.then((function(e){return e.put(ct(t.url),r)})).catch((function(t){return _(t.message)}))}))}}function ct(t){var e=t.indexOf("?");return e<0?t:t.slice(0,e)}function ut(t,e){if(st(),!Q)return e(null);var r=ct(t.url);Q.then((function(t){t.match(r).then((function(n){var i=function(t){if(!t)return!1;var e=new Date(t.headers.get("Expires")||0),r=A(t.headers.get("Cache-Control")||"");return e>Date.now()&&!r["no-cache"]}(n);t.delete(r),i&&t.put(r,n.clone()),e(null,n,i)})).catch(e)})).catch(e)}var ft,ht=1/0;function pt(){return null==ft&&(ft=self.OffscreenCanvas&&new self.OffscreenCanvas(1,1).getContext("2d")&&"function"==typeof self.createImageBitmap),ft}var dt={Unknown:"Unknown",Style:"Style",Source:"Source",Tile:"Tile",Glyphs:"Glyphs",SpriteImage:"SpriteImage",SpriteJSON:"SpriteJSON",Image:"Image"};"function"==typeof Object.freeze&&Object.freeze(dt);var mt=function(t){function e(e,r,n){401===r&&Y(n)&&(e+=": you may have provided an invalid Mapbox access token. See https://www.mapbox.com/api-documentation/#access-tokens-and-token-scopes"),t.call(this,e),this.status=r,this.url=n,this.name=this.constructor.name,this.message=e}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.toString=function(){return this.name+": "+this.message+" ("+this.status+"): "+this.url},e}(Error),gt=k()?function(){return self.worker&&self.worker.referrer}:function(){return("blob:"===self.location.protocol?self.parent:self).location.href};function vt(t,e){var r,n=new self.AbortController,i=new self.Request(t.url,{method:t.method||"GET",body:t.body,credentials:t.credentials,headers:t.headers,referrer:gt(),signal:n.signal}),a=!1,o=!1,s=(r=i.url).indexOf("sku=")>0&&Y(r);"json"===t.type&&i.headers.set("Accept","application/json");var l=function(r,n,a){if(!o){if(r&&"SecurityError"!==r.message&&_(r),n&&a)return c(n);var l=Date.now();self.fetch(i).then((function(r){if(r.ok){var n=s?r.clone():null;return c(r,n,l)}return e(new mt(r.statusText,r.status,t.url))})).catch((function(t){20!==t.code&&e(new Error(t.message))}))}},c=function(r,n,s){("arrayBuffer"===t.type?r.arrayBuffer():"json"===t.type?r.json():r.text()).then((function(t){o||(n&&s&<(i,n,s),a=!0,e(null,t,r.headers.get("Cache-Control"),r.headers.get("Expires")))})).catch((function(t){o||e(new Error(t.message))}))};return s?ut(i,l):l(null,null),{cancel:function(){o=!0,a||n.abort()}}}var yt=function(t,e){if(r=t.url,!(/^file:/.test(r)||/^file:/.test(gt())&&!/^\w+:/.test(r))){if(self.fetch&&self.Request&&self.AbortController&&self.Request.prototype.hasOwnProperty("signal"))return vt(t,e);if(k()&&self.worker&&self.worker.actor){return self.worker.actor.send("getResource",t,e,void 0,!0)}}var r;return function(t,e){var r=new self.XMLHttpRequest;for(var n in r.open(t.method||"GET",t.url,!0),"arrayBuffer"===t.type&&(r.responseType="arraybuffer"),t.headers)r.setRequestHeader(n,t.headers[n]);return"json"===t.type&&(r.responseType="text",r.setRequestHeader("Accept","application/json")),r.withCredentials="include"===t.credentials,r.onerror=function(){e(new Error(r.statusText))},r.onload=function(){if((r.status>=200&&r.status<300||0===r.status)&&null!==r.response){var n=r.response;if("json"===t.type)try{n=JSON.parse(r.response)}catch(t){return e(t)}e(null,n,r.getResponseHeader("Cache-Control"),r.getResponseHeader("Expires"))}else e(new mt(r.statusText,r.status,t.url))},r.send(t.body),{cancel:function(){return r.abort()}}}(t,e)},xt=function(t,e){return yt(u(t,{type:"arrayBuffer"}),e)},bt=function(t,e){return yt(u(t,{method:"POST"}),e)};var _t,wt;_t=[],wt=0;var Tt=function(t,e){if(B.supported&&(t.headers||(t.headers={}),t.headers.accept="image/webp,*/*"),wt>=F.MAX_PARALLEL_IMAGE_REQUESTS){var r={requestParameters:t,callback:e,cancelled:!1,cancel:function(){this.cancelled=!0}};return _t.push(r),r}wt++;var n=!1,i=function(){if(!n)for(n=!0,wt--;_t.length&&wt0||this._oneTimeListeners&&this._oneTimeListeners[t]&&this._oneTimeListeners[t].length>0||this._eventedParent&&this._eventedParent.listens(t)},Et.prototype.setEventedParent=function(t,e){return this._eventedParent=t,this._eventedParentData=e,this};var Lt={$version:8,$root:{version:{required:!0,type:"enum",values:[8]},name:{type:"string"},metadata:{type:"*"},center:{type:"array",value:"number"},zoom:{type:"number"},bearing:{type:"number",default:0,period:360,units:"degrees"},pitch:{type:"number",default:0,units:"degrees"},light:{type:"light"},sources:{required:!0,type:"sources"},sprite:{type:"string"},glyphs:{type:"string"},transition:{type:"transition"},layers:{required:!0,type:"array",value:"layer"}},sources:{"*":{type:"source"}},source:["source_vector","source_raster","source_raster_dem","source_geojson","source_video","source_image"],source_vector:{type:{required:!0,type:"enum",values:{vector:{}}},url:{type:"string"},tiles:{type:"array",value:"string"},bounds:{type:"array",value:"number",length:4,default:[-180,-85.051129,180,85.051129]},scheme:{type:"enum",values:{xyz:{},tms:{}},default:"xyz"},minzoom:{type:"number",default:0},maxzoom:{type:"number",default:22},attribution:{type:"string"},promoteId:{type:"promoteId"},"*":{type:"*"}},source_raster:{type:{required:!0,type:"enum",values:{raster:{}}},url:{type:"string"},tiles:{type:"array",value:"string"},bounds:{type:"array",value:"number",length:4,default:[-180,-85.051129,180,85.051129]},minzoom:{type:"number",default:0},maxzoom:{type:"number",default:22},tileSize:{type:"number",default:512,units:"pixels"},scheme:{type:"enum",values:{xyz:{},tms:{}},default:"xyz"},attribution:{type:"string"},"*":{type:"*"}},source_raster_dem:{type:{required:!0,type:"enum",values:{"raster-dem":{}}},url:{type:"string"},tiles:{type:"array",value:"string"},bounds:{type:"array",value:"number",length:4,default:[-180,-85.051129,180,85.051129]},minzoom:{type:"number",default:0},maxzoom:{type:"number",default:22},tileSize:{type:"number",default:512,units:"pixels"},attribution:{type:"string"},encoding:{type:"enum",values:{terrarium:{},mapbox:{}},default:"mapbox"},"*":{type:"*"}},source_geojson:{type:{required:!0,type:"enum",values:{geojson:{}}},data:{type:"*"},maxzoom:{type:"number",default:18},attribution:{type:"string"},buffer:{type:"number",default:128,maximum:512,minimum:0},tolerance:{type:"number",default:.375},cluster:{type:"boolean",default:!1},clusterRadius:{type:"number",default:50,minimum:0},clusterMaxZoom:{type:"number"},clusterProperties:{type:"*"},lineMetrics:{type:"boolean",default:!1},generateId:{type:"boolean",default:!1},promoteId:{type:"promoteId"}},source_video:{type:{required:!0,type:"enum",values:{video:{}}},urls:{required:!0,type:"array",value:"string"},coordinates:{required:!0,type:"array",length:4,value:{type:"array",length:2,value:"number"}}},source_image:{type:{required:!0,type:"enum",values:{image:{}}},url:{required:!0,type:"string"},coordinates:{required:!0,type:"array",length:4,value:{type:"array",length:2,value:"number"}}},layer:{id:{type:"string",required:!0},type:{type:"enum",values:{fill:{},line:{},symbol:{},circle:{},heatmap:{},"fill-extrusion":{},raster:{},hillshade:{},background:{}},required:!0},metadata:{type:"*"},source:{type:"string"},"source-layer":{type:"string"},minzoom:{type:"number",minimum:0,maximum:24},maxzoom:{type:"number",minimum:0,maximum:24},filter:{type:"filter"},layout:{type:"layout"},paint:{type:"paint"}},layout:["layout_fill","layout_line","layout_circle","layout_heatmap","layout_fill-extrusion","layout_symbol","layout_raster","layout_hillshade","layout_background"],layout_background:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_fill:{"fill-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_circle:{"circle-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_heatmap:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},"layout_fill-extrusion":{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_line:{"line-cap":{type:"enum",values:{butt:{},round:{},square:{}},default:"butt",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"line-join":{type:"enum",values:{bevel:{},round:{},miter:{}},default:"miter",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"line-miter-limit":{type:"number",default:2,requires:[{"line-join":"miter"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"line-round-limit":{type:"number",default:1.05,requires:[{"line-join":"round"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"line-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_symbol:{"symbol-placement":{type:"enum",values:{point:{},line:{},"line-center":{}},default:"point",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"symbol-spacing":{type:"number",default:250,minimum:1,units:"pixels",requires:[{"symbol-placement":"line"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"symbol-avoid-edges":{type:"boolean",default:!1,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"symbol-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"symbol-z-order":{type:"enum",values:{auto:{},"viewport-y":{},source:{}},default:"auto",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-allow-overlap":{type:"boolean",default:!1,requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-ignore-placement":{type:"boolean",default:!1,requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-optional":{type:"boolean",default:!1,requires:["icon-image","text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-rotation-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-size":{type:"number",default:1,minimum:0,units:"factor of the original icon size",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-text-fit":{type:"enum",values:{none:{},width:{},height:{},both:{}},default:"none",requires:["icon-image","text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-text-fit-padding":{type:"array",value:"number",length:4,default:[0,0,0,0],units:"pixels",requires:["icon-image","text-field",{"icon-text-fit":["both","width","height"]}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"icon-image":{type:"resolvedImage",tokens:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-rotate":{type:"number",default:0,period:360,units:"degrees",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-padding":{type:"number",default:2,minimum:0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"icon-keep-upright":{type:"boolean",default:!1,requires:["icon-image",{"icon-rotation-alignment":"map"},{"symbol-placement":["line","line-center"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-offset":{type:"array",value:"number",length:2,default:[0,0],requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-anchor":{type:"enum",values:{center:{},left:{},right:{},top:{},bottom:{},"top-left":{},"top-right":{},"bottom-left":{},"bottom-right":{}},default:"center",requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-pitch-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-pitch-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-rotation-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-field":{type:"formatted",default:"",tokens:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-font":{type:"array",value:"string",default:["Open Sans Regular","Arial Unicode MS Regular"],requires:["text-field"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-size":{type:"number",default:16,minimum:0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-max-width":{type:"number",default:10,minimum:0,units:"ems",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-line-height":{type:"number",default:1.2,units:"ems",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-letter-spacing":{type:"number",default:0,units:"ems",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-justify":{type:"enum",values:{auto:{},left:{},center:{},right:{}},default:"center",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-radial-offset":{type:"number",units:"ems",default:0,requires:["text-field"],"property-type":"data-driven",expression:{interpolated:!0,parameters:["zoom","feature"]}},"text-variable-anchor":{type:"array",value:"enum",values:{center:{},left:{},right:{},top:{},bottom:{},"top-left":{},"top-right":{},"bottom-left":{},"bottom-right":{}},requires:["text-field",{"symbol-placement":["point"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-anchor":{type:"enum",values:{center:{},left:{},right:{},top:{},bottom:{},"top-left":{},"top-right":{},"bottom-left":{},"bottom-right":{}},default:"center",requires:["text-field",{"!":"text-variable-anchor"}],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-max-angle":{type:"number",default:45,units:"degrees",requires:["text-field",{"symbol-placement":["line","line-center"]}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-writing-mode":{type:"array",value:"enum",values:{horizontal:{},vertical:{}},requires:["text-field",{"symbol-placement":["point"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-rotate":{type:"number",default:0,period:360,units:"degrees",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-padding":{type:"number",default:2,minimum:0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-keep-upright":{type:"boolean",default:!0,requires:["text-field",{"text-rotation-alignment":"map"},{"symbol-placement":["line","line-center"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-transform":{type:"enum",values:{none:{},uppercase:{},lowercase:{}},default:"none",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-offset":{type:"array",value:"number",units:"ems",length:2,default:[0,0],requires:["text-field",{"!":"text-radial-offset"}],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-allow-overlap":{type:"boolean",default:!1,requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-ignore-placement":{type:"boolean",default:!1,requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-optional":{type:"boolean",default:!1,requires:["text-field","icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_raster:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_hillshade:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},filter:{type:"array",value:"*"},filter_operator:{type:"enum",values:{"==":{},"!=":{},">":{},">=":{},"<":{},"<=":{},in:{},"!in":{},all:{},any:{},none:{},has:{},"!has":{},within:{}}},geometry_type:{type:"enum",values:{Point:{},LineString:{},Polygon:{}}},function:{expression:{type:"expression"},stops:{type:"array",value:"function_stop"},base:{type:"number",default:1,minimum:0},property:{type:"string",default:"$zoom"},type:{type:"enum",values:{identity:{},exponential:{},interval:{},categorical:{}},default:"exponential"},colorSpace:{type:"enum",values:{rgb:{},lab:{},hcl:{}},default:"rgb"},default:{type:"*",required:!1}},function_stop:{type:"array",minimum:0,maximum:24,value:["number","color"],length:2},expression:{type:"array",value:"*",minimum:1},expression_name:{type:"enum",values:{let:{group:"Variable binding"},var:{group:"Variable binding"},literal:{group:"Types"},array:{group:"Types"},at:{group:"Lookup"},in:{group:"Lookup"},"index-of":{group:"Lookup"},slice:{group:"Lookup"},case:{group:"Decision"},match:{group:"Decision"},coalesce:{group:"Decision"},step:{group:"Ramps, scales, curves"},interpolate:{group:"Ramps, scales, curves"},"interpolate-hcl":{group:"Ramps, scales, curves"},"interpolate-lab":{group:"Ramps, scales, curves"},ln2:{group:"Math"},pi:{group:"Math"},e:{group:"Math"},typeof:{group:"Types"},string:{group:"Types"},number:{group:"Types"},boolean:{group:"Types"},object:{group:"Types"},collator:{group:"Types"},format:{group:"Types"},image:{group:"Types"},"number-format":{group:"Types"},"to-string":{group:"Types"},"to-number":{group:"Types"},"to-boolean":{group:"Types"},"to-rgba":{group:"Color"},"to-color":{group:"Types"},rgb:{group:"Color"},rgba:{group:"Color"},get:{group:"Lookup"},has:{group:"Lookup"},length:{group:"Lookup"},properties:{group:"Feature data"},"feature-state":{group:"Feature data"},"geometry-type":{group:"Feature data"},id:{group:"Feature data"},zoom:{group:"Zoom"},"heatmap-density":{group:"Heatmap"},"line-progress":{group:"Feature data"},accumulated:{group:"Feature data"},"+":{group:"Math"},"*":{group:"Math"},"-":{group:"Math"},"/":{group:"Math"},"%":{group:"Math"},"^":{group:"Math"},sqrt:{group:"Math"},log10:{group:"Math"},ln:{group:"Math"},log2:{group:"Math"},sin:{group:"Math"},cos:{group:"Math"},tan:{group:"Math"},asin:{group:"Math"},acos:{group:"Math"},atan:{group:"Math"},min:{group:"Math"},max:{group:"Math"},round:{group:"Math"},abs:{group:"Math"},ceil:{group:"Math"},floor:{group:"Math"},distance:{group:"Math"},"==":{group:"Decision"},"!=":{group:"Decision"},">":{group:"Decision"},"<":{group:"Decision"},">=":{group:"Decision"},"<=":{group:"Decision"},all:{group:"Decision"},any:{group:"Decision"},"!":{group:"Decision"},within:{group:"Decision"},"is-supported-script":{group:"String"},upcase:{group:"String"},downcase:{group:"String"},concat:{group:"String"},"resolved-locale":{group:"String"}}},light:{anchor:{type:"enum",default:"viewport",values:{map:{},viewport:{}},"property-type":"data-constant",transition:!1,expression:{interpolated:!1,parameters:["zoom"]}},position:{type:"array",default:[1.15,210,30],length:3,value:"number","property-type":"data-constant",transition:!0,expression:{interpolated:!0,parameters:["zoom"]}},color:{type:"color","property-type":"data-constant",default:"#ffffff",expression:{interpolated:!0,parameters:["zoom"]},transition:!0},intensity:{type:"number","property-type":"data-constant",default:.5,minimum:0,maximum:1,expression:{interpolated:!0,parameters:["zoom"]},transition:!0}},paint:["paint_fill","paint_line","paint_circle","paint_heatmap","paint_fill-extrusion","paint_symbol","paint_raster","paint_hillshade","paint_background"],paint_fill:{"fill-antialias":{type:"boolean",default:!0,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"fill-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"fill-pattern"}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-outline-color":{type:"color",transition:!0,requires:[{"!":"fill-pattern"},{"fill-antialias":!0}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"fill-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["fill-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"fill-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"cross-faded-data-driven"}},"paint_fill-extrusion":{"fill-extrusion-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"fill-extrusion-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"fill-extrusion-pattern"}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-extrusion-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"fill-extrusion-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["fill-extrusion-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"fill-extrusion-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"cross-faded-data-driven"},"fill-extrusion-height":{type:"number",default:0,minimum:0,units:"meters",transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-extrusion-base":{type:"number",default:0,minimum:0,units:"meters",transition:!0,requires:["fill-extrusion-height"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-extrusion-vertical-gradient":{type:"boolean",default:!0,transition:!1,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"}},paint_line:{"line-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"line-pattern"}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"line-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["line-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"line-width":{type:"number",default:1,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-gap-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-offset":{type:"number",default:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-blur":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-dasharray":{type:"array",value:"number",minimum:0,transition:!0,units:"line widths",requires:[{"!":"line-pattern"}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"cross-faded"},"line-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"cross-faded-data-driven"},"line-gradient":{type:"color",transition:!1,requires:[{"!":"line-dasharray"},{"!":"line-pattern"},{source:"geojson",has:{lineMetrics:!0}}],expression:{interpolated:!0,parameters:["line-progress"]},"property-type":"color-ramp"}},paint_circle:{"circle-radius":{type:"number",default:5,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-blur":{type:"number",default:0,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"circle-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["circle-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"circle-pitch-scale":{type:"enum",values:{map:{},viewport:{}},default:"map",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"circle-pitch-alignment":{type:"enum",values:{map:{},viewport:{}},default:"viewport",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"circle-stroke-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-stroke-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-stroke-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"}},paint_heatmap:{"heatmap-radius":{type:"number",default:30,minimum:1,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"heatmap-weight":{type:"number",default:1,minimum:0,transition:!1,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"heatmap-intensity":{type:"number",default:1,minimum:0,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"heatmap-color":{type:"color",default:["interpolate",["linear"],["heatmap-density"],0,"rgba(0, 0, 255, 0)",.1,"royalblue",.3,"cyan",.5,"lime",.7,"yellow",1,"red"],transition:!1,expression:{interpolated:!0,parameters:["heatmap-density"]},"property-type":"color-ramp"},"heatmap-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},paint_symbol:{"icon-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-color":{type:"color",default:"#000000",transition:!0,requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-halo-color":{type:"color",default:"rgba(0, 0, 0, 0)",transition:!0,requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-halo-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-halo-blur":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"icon-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["icon-image","icon-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-color":{type:"color",default:"#000000",transition:!0,overridable:!0,requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-halo-color":{type:"color",default:"rgba(0, 0, 0, 0)",transition:!0,requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-halo-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-halo-blur":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["text-field","text-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"}},paint_raster:{"raster-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-hue-rotate":{type:"number",default:0,period:360,transition:!0,units:"degrees",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-brightness-min":{type:"number",default:0,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-brightness-max":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-saturation":{type:"number",default:0,minimum:-1,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-contrast":{type:"number",default:0,minimum:-1,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-resampling":{type:"enum",values:{linear:{},nearest:{}},default:"linear",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"raster-fade-duration":{type:"number",default:300,minimum:0,transition:!1,units:"milliseconds",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},paint_hillshade:{"hillshade-illumination-direction":{type:"number",default:335,minimum:0,maximum:359,transition:!1,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-illumination-anchor":{type:"enum",values:{map:{},viewport:{}},default:"viewport",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-exaggeration":{type:"number",default:.5,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-shadow-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-highlight-color":{type:"color",default:"#FFFFFF",transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-accent-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},paint_background:{"background-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"background-pattern"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"background-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"cross-faded"},"background-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},transition:{duration:{type:"number",default:300,minimum:0,units:"milliseconds"},delay:{type:"number",default:0,minimum:0,units:"milliseconds"}},"property-type":{"data-driven":{type:"property-type"},"cross-faded":{type:"property-type"},"cross-faded-data-driven":{type:"property-type"},"color-ramp":{type:"property-type"},"data-constant":{type:"property-type"},constant:{type:"property-type"}},promoteId:{"*":{type:"string"}}},Ct=function(t,e,r,n){this.message=(t?t+": ":"")+r,n&&(this.identifier=n),null!=e&&e.__line__&&(this.line=e.__line__)};function Pt(t){var e=t.key,r=t.value;return r?[new Ct(e,r,"constants have been deprecated as of v8")]:[]}function It(t){for(var e=[],r=arguments.length-1;r-- >0;)e[r]=arguments[r+1];for(var n=0,i=e;n":"value"===t.itemType.kind?"array":"array<"+e+">"}return t.kind}var Zt=[Ft,Bt,Nt,jt,Ut,Gt,Vt,Wt(Ht),Yt];function Jt(t,e){if("error"===e.kind)return null;if("array"===t.kind){if("array"===e.kind&&(0===e.N&&"value"===e.itemType.kind||!Jt(t.itemType,e.itemType))&&("number"!=typeof t.N||t.N===e.N))return null}else{if(t.kind===e.kind)return null;if("value"===t.kind)for(var r=0,n=Zt;r255?255:t}function i(t){return t<0?0:t>1?1:t}function a(t){return"%"===t[t.length-1]?n(parseFloat(t)/100*255):n(parseInt(t))}function o(t){return"%"===t[t.length-1]?i(parseFloat(t)/100):i(parseFloat(t))}function s(t,e,r){return r<0?r+=1:r>1&&(r-=1),6*r<1?t+(e-t)*r*6:2*r<1?e:3*r<2?t+(e-t)*(2/3-r)*6:t}try{e.parseCSSColor=function(t){var e,i=t.replace(/ /g,"").toLowerCase();if(i in r)return r[i].slice();if("#"===i[0])return 4===i.length?(e=parseInt(i.substr(1),16))>=0&&e<=4095?[(3840&e)>>4|(3840&e)>>8,240&e|(240&e)>>4,15&e|(15&e)<<4,1]:null:7===i.length&&(e=parseInt(i.substr(1),16))>=0&&e<=16777215?[(16711680&e)>>16,(65280&e)>>8,255&e,1]:null;var l=i.indexOf("("),c=i.indexOf(")");if(-1!==l&&c+1===i.length){var u=i.substr(0,l),f=i.substr(l+1,c-(l+1)).split(","),h=1;switch(u){case"rgba":if(4!==f.length)return null;h=o(f.pop());case"rgb":return 3!==f.length?null:[a(f[0]),a(f[1]),a(f[2]),h];case"hsla":if(4!==f.length)return null;h=o(f.pop());case"hsl":if(3!==f.length)return null;var p=(parseFloat(f[0])%360+360)%360/360,d=o(f[1]),m=o(f[2]),g=m<=.5?m*(d+1):m+d-m*d,v=2*m-g;return[n(255*s(v,g,p+1/3)),n(255*s(v,g,p)),n(255*s(v,g,p-1/3)),h];default:return null}}return null}}catch(t){}})).parseCSSColor,te=function(t,e,r,n){void 0===n&&(n=1),this.r=t,this.g=e,this.b=r,this.a=n};te.parse=function(t){if(t){if(t instanceof te)return t;if("string"==typeof t){var e=$t(t);if(e)return new te(e[0]/255*e[3],e[1]/255*e[3],e[2]/255*e[3],e[3])}}},te.prototype.toString=function(){var t=this.toArray(),e=t[0],r=t[1],n=t[2],i=t[3];return"rgba("+Math.round(e)+","+Math.round(r)+","+Math.round(n)+","+i+")"},te.prototype.toArray=function(){var t=this.r,e=this.g,r=this.b,n=this.a;return 0===n?[0,0,0,0]:[255*t/n,255*e/n,255*r/n,n]},te.black=new te(0,0,0,1),te.white=new te(1,1,1,1),te.transparent=new te(0,0,0,0),te.red=new te(1,0,0,1);var ee=function(t,e,r){this.sensitivity=t?e?"variant":"case":e?"accent":"base",this.locale=r,this.collator=new Intl.Collator(this.locale?this.locale:[],{sensitivity:this.sensitivity,usage:"search"})};ee.prototype.compare=function(t,e){return this.collator.compare(t,e)},ee.prototype.resolvedLocale=function(){return new Intl.Collator(this.locale?this.locale:[]).resolvedOptions().locale};var re=function(t,e,r,n,i){this.text=t,this.image=e,this.scale=r,this.fontStack=n,this.textColor=i},ne=function(t){this.sections=t};ne.fromString=function(t){return new ne([new re(t,null,null,null,null)])},ne.prototype.isEmpty=function(){return 0===this.sections.length||!this.sections.some((function(t){return 0!==t.text.length||t.image&&0!==t.image.name.length}))},ne.factory=function(t){return t instanceof ne?t:ne.fromString(t)},ne.prototype.toString=function(){return 0===this.sections.length?"":this.sections.map((function(t){return t.text})).join("")},ne.prototype.serialize=function(){for(var t=["format"],e=0,r=this.sections;e=0&&t<=255&&"number"==typeof e&&e>=0&&e<=255&&"number"==typeof r&&r>=0&&r<=255?void 0===n||"number"==typeof n&&n>=0&&n<=1?null:"Invalid rgba value ["+[t,e,r,n].join(", ")+"]: 'a' must be between 0 and 1.":"Invalid rgba value ["+("number"==typeof n?[t,e,r,n]:[t,e,r]).join(", ")+"]: 'r', 'g', and 'b' must be between 0 and 255."}function oe(t){if(null===t)return!0;if("string"==typeof t)return!0;if("boolean"==typeof t)return!0;if("number"==typeof t)return!0;if(t instanceof te)return!0;if(t instanceof ee)return!0;if(t instanceof ne)return!0;if(t instanceof ie)return!0;if(Array.isArray(t)){for(var e=0,r=t;e2){var s=t[1];if("string"!=typeof s||!(s in fe)||"object"===s)return e.error('The item type argument of "array" must be one of string, number, boolean',1);a=fe[s],n++}else a=Ht;if(t.length>3){if(null!==t[2]&&("number"!=typeof t[2]||t[2]<0||t[2]!==Math.floor(t[2])))return e.error('The length argument to "array" must be a positive integer literal',2);o=t[2],n++}r=Wt(a,o)}else r=fe[i];for(var l=[];n1)&&e.push(n)}}return e.concat(this.args.map((function(t){return t.serialize()})))};var pe=function(t){this.type=Gt,this.sections=t};pe.parse=function(t,e){if(t.length<2)return e.error("Expected at least one argument.");var r=t[1];if(!Array.isArray(r)&&"object"==typeof r)return e.error("First argument must be an image or text section.");for(var n=[],i=!1,a=1;a<=t.length-1;++a){var o=t[a];if(i&&"object"==typeof o&&!Array.isArray(o)){i=!1;var s=null;if(o["font-scale"]&&!(s=e.parse(o["font-scale"],1,Bt)))return null;var l=null;if(o["text-font"]&&!(l=e.parse(o["text-font"],1,Wt(Nt))))return null;var c=null;if(o["text-color"]&&!(c=e.parse(o["text-color"],1,Ut)))return null;var u=n[n.length-1];u.scale=s,u.font=l,u.textColor=c}else{var f=e.parse(t[a],1,Ht);if(!f)return null;var h=f.type.kind;if("string"!==h&&"value"!==h&&"null"!==h&&"resolvedImage"!==h)return e.error("Formatted text type must be 'string', 'value', 'image' or 'null'.");i=!0,n.push({content:f,scale:null,font:null,textColor:null})}}return new pe(n)},pe.prototype.evaluate=function(t){return new ne(this.sections.map((function(e){var r=e.content.evaluate(t);return se(r)===Yt?new re("",r,null,null,null):new re(le(r),null,e.scale?e.scale.evaluate(t):null,e.font?e.font.evaluate(t).join(","):null,e.textColor?e.textColor.evaluate(t):null)})))},pe.prototype.eachChild=function(t){for(var e=0,r=this.sections;e-1),r},de.prototype.eachChild=function(t){t(this.input)},de.prototype.outputDefined=function(){return!1},de.prototype.serialize=function(){return["image",this.input.serialize()]};var me={"to-boolean":jt,"to-color":Ut,"to-number":Bt,"to-string":Nt},ge=function(t,e){this.type=t,this.args=e};ge.parse=function(t,e){if(t.length<2)return e.error("Expected at least one argument.");var r=t[0];if(("to-boolean"===r||"to-string"===r)&&2!==t.length)return e.error("Expected one argument.");for(var n=me[r],i=[],a=1;a4?"Invalid rbga value "+JSON.stringify(e)+": expected an array containing either three or four numeric values.":ae(e[0],e[1],e[2],e[3])))return new te(e[0]/255,e[1]/255,e[2]/255,e[3])}throw new ue(r||"Could not parse color from value '"+("string"==typeof e?e:String(JSON.stringify(e)))+"'")}if("number"===this.type.kind){for(var o=null,s=0,l=this.args;s=e[2])&&(!(t[1]<=e[1])&&!(t[3]>=e[3])))}function Te(t,e){var r,n=(180+t[0])/360,i=(r=t[1],(180-180/Math.PI*Math.log(Math.tan(Math.PI/4+r*Math.PI/360)))/360),a=Math.pow(2,e.z);return[Math.round(n*a*8192),Math.round(i*a*8192)]}function ke(t,e,r){return e[1]>t[1]!=r[1]>t[1]&&t[0]<(r[0]-e[0])*(t[1]-e[1])/(r[1]-e[1])+e[0]}function Ae(t,e){for(var r,n,i,a,o,s,l,c=!1,u=0,f=e.length;u0&&f<0||u<0&&f>0}function Ee(t,e,r){for(var n=0,i=r;nr[2]){var i=.5*n,a=t[0]-r[0]>i?-n:r[0]-t[0]>i?n:0;0===a&&(a=t[0]-r[2]>i?-n:r[2]-t[0]>i?n:0),t[0]+=a}_e(e,t)}function ze(t,e,r,n){for(var i=8192*Math.pow(2,n.z),a=[8192*n.x,8192*n.y],o=[],s=0,l=t;s=0)return!1;var r=!0;return t.eachChild((function(t){r&&!Ne(t,e)&&(r=!1)})),r}Re.parse=function(t,e){if(2!==t.length)return e.error("'within' expression requires exactly one argument, but found "+(t.length-1)+" instead.");if(oe(t[1])){var r=t[1];if("FeatureCollection"===r.type)for(var n=0;ne))throw new ue("Input is not a number.");o=s-1}return 0}Ue.prototype.parse=function(t,e,r,n,i){return void 0===i&&(i={}),e?this.concat(e,r,n)._parse(t,i):this._parse(t,i)},Ue.prototype._parse=function(t,e){function r(t,e,r){return"assert"===r?new he(e,[t]):"coerce"===r?new ge(e,[t]):t}if(null!==t&&"string"!=typeof t&&"boolean"!=typeof t&&"number"!=typeof t||(t=["literal",t]),Array.isArray(t)){if(0===t.length)return this.error('Expected an array with at least one element. If you wanted a literal array, use ["literal", []].');var n=t[0];if("string"!=typeof n)return this.error("Expression name must be a string, but found "+typeof n+' instead. If you wanted a literal array, use ["literal", [...]].',0),null;var i=this.registry[n];if(i){var a=i.parse(t,this);if(!a)return null;if(this.expectedType){var o=this.expectedType,s=a.type;if("string"!==o.kind&&"number"!==o.kind&&"boolean"!==o.kind&&"object"!==o.kind&&"array"!==o.kind||"value"!==s.kind)if("color"!==o.kind&&"formatted"!==o.kind&&"resolvedImage"!==o.kind||"value"!==s.kind&&"string"!==s.kind){if(this.checkSubtype(o,s))return null}else a=r(a,o,e.typeAnnotation||"coerce");else a=r(a,o,e.typeAnnotation||"assert")}if(!(a instanceof ce)&&"resolvedImage"!==a.type.kind&&function t(e){if(e instanceof je)return t(e.boundExpression);if(e instanceof xe&&"error"===e.name)return!1;if(e instanceof be)return!1;if(e instanceof Re)return!1;var r=e instanceof ge||e instanceof he,n=!0;if(e.eachChild((function(e){n=r?n&&t(e):n&&e instanceof ce})),!n)return!1;return Fe(e)&&Ne(e,["zoom","heatmap-density","line-progress","accumulated","is-supported-script"])}(a)){var l=new ye;try{a=new ce(a.type,a.evaluate(l))}catch(t){return this.error(t.message),null}}return a}return this.error('Unknown expression "'+n+'". If you wanted a literal array, use ["literal", [...]].',0)}return void 0===t?this.error("'undefined' value invalid. Use null instead."):"object"==typeof t?this.error('Bare objects invalid. Use ["literal", {...}] instead.'):this.error("Expected an array, but found "+typeof t+" instead.")},Ue.prototype.concat=function(t,e,r){var n="number"==typeof t?this.path.concat(t):this.path,i=r?this.scope.concat(r):this.scope;return new Ue(this.registry,n,e||null,i,this.errors)},Ue.prototype.error=function(t){for(var e=[],r=arguments.length-1;r-- >0;)e[r]=arguments[r+1];var n=""+this.key+e.map((function(t){return"["+t+"]"})).join("");this.errors.push(new Dt(n,t))},Ue.prototype.checkSubtype=function(t,e){var r=Jt(t,e);return r&&this.error(r),r};var He=function(t,e,r){this.type=t,this.input=e,this.labels=[],this.outputs=[];for(var n=0,i=r;n=o)return e.error('Input/output pairs for "step" expressions must be arranged with input values in strictly ascending order.',l);var u=e.parse(s,c,i);if(!u)return null;i=i||u.type,n.push([o,u])}return new He(i,r,n)},He.prototype.evaluate=function(t){var e=this.labels,r=this.outputs;if(1===e.length)return r[0].evaluate(t);var n=this.input.evaluate(t);if(n<=e[0])return r[0].evaluate(t);var i=e.length;return n>=e[i-1]?r[i-1].evaluate(t):r[Ve(e,n)].evaluate(t)},He.prototype.eachChild=function(t){t(this.input);for(var e=0,r=this.outputs;e0&&t.push(this.labels[e]),t.push(this.outputs[e].serialize());return t};var Ge=Object.freeze({__proto__:null,number:qe,color:function(t,e,r){return new te(qe(t.r,e.r,r),qe(t.g,e.g,r),qe(t.b,e.b,r),qe(t.a,e.a,r))},array:function(t,e,r){return t.map((function(t,n){return qe(t,e[n],r)}))}}),Ye=6/29,We=3*Ye*Ye,Xe=Math.PI/180,Ze=180/Math.PI;function Je(t){return t>.008856451679035631?Math.pow(t,1/3):t/We+4/29}function Ke(t){return t>Ye?t*t*t:We*(t-4/29)}function Qe(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function $e(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function tr(t){var e=$e(t.r),r=$e(t.g),n=$e(t.b),i=Je((.4124564*e+.3575761*r+.1804375*n)/.95047),a=Je((.2126729*e+.7151522*r+.072175*n)/1);return{l:116*a-16,a:500*(i-a),b:200*(a-Je((.0193339*e+.119192*r+.9503041*n)/1.08883)),alpha:t.a}}function er(t){var e=(t.l+16)/116,r=isNaN(t.a)?e:e+t.a/500,n=isNaN(t.b)?e:e-t.b/200;return e=1*Ke(e),r=.95047*Ke(r),n=1.08883*Ke(n),new te(Qe(3.2404542*r-1.5371385*e-.4985314*n),Qe(-.969266*r+1.8760108*e+.041556*n),Qe(.0556434*r-.2040259*e+1.0572252*n),t.alpha)}function rr(t,e,r){var n=e-t;return t+r*(n>180||n<-180?n-360*Math.round(n/360):n)}var nr={forward:tr,reverse:er,interpolate:function(t,e,r){return{l:qe(t.l,e.l,r),a:qe(t.a,e.a,r),b:qe(t.b,e.b,r),alpha:qe(t.alpha,e.alpha,r)}}},ir={forward:function(t){var e=tr(t),r=e.l,n=e.a,i=e.b,a=Math.atan2(i,n)*Ze;return{h:a<0?a+360:a,c:Math.sqrt(n*n+i*i),l:r,alpha:t.a}},reverse:function(t){var e=t.h*Xe,r=t.c;return er({l:t.l,a:Math.cos(e)*r,b:Math.sin(e)*r,alpha:t.alpha})},interpolate:function(t,e,r){return{h:rr(t.h,e.h,r),c:qe(t.c,e.c,r),l:qe(t.l,e.l,r),alpha:qe(t.alpha,e.alpha,r)}}},ar=Object.freeze({__proto__:null,lab:nr,hcl:ir}),or=function(t,e,r,n,i){this.type=t,this.operator=e,this.interpolation=r,this.input=n,this.labels=[],this.outputs=[];for(var a=0,o=i;a1})))return e.error("Cubic bezier interpolation requires four numeric arguments with values between 0 and 1.",1);n={name:"cubic-bezier",controlPoints:s}}if(t.length-1<4)return e.error("Expected at least 4 arguments, but found only "+(t.length-1)+".");if((t.length-1)%2!=0)return e.error("Expected an even number of arguments.");if(!(i=e.parse(i,2,Bt)))return null;var l=[],c=null;"interpolate-hcl"===r||"interpolate-lab"===r?c=Ut:e.expectedType&&"value"!==e.expectedType.kind&&(c=e.expectedType);for(var u=0;u=f)return e.error('Input/output pairs for "interpolate" expressions must be arranged with input values in strictly ascending order.',p);var m=e.parse(h,d,c);if(!m)return null;c=c||m.type,l.push([f,m])}return"number"===c.kind||"color"===c.kind||"array"===c.kind&&"number"===c.itemType.kind&&"number"==typeof c.N?new or(c,r,n,i,l):e.error("Type "+Xt(c)+" is not interpolatable.")},or.prototype.evaluate=function(t){var e=this.labels,r=this.outputs;if(1===e.length)return r[0].evaluate(t);var n=this.input.evaluate(t);if(n<=e[0])return r[0].evaluate(t);var i=e.length;if(n>=e[i-1])return r[i-1].evaluate(t);var a=Ve(e,n),o=e[a],s=e[a+1],l=or.interpolationFactor(this.interpolation,n,o,s),c=r[a].evaluate(t),u=r[a+1].evaluate(t);return"interpolate"===this.operator?Ge[this.type.kind.toLowerCase()](c,u,l):"interpolate-hcl"===this.operator?ir.reverse(ir.interpolate(ir.forward(c),ir.forward(u),l)):nr.reverse(nr.interpolate(nr.forward(c),nr.forward(u),l))},or.prototype.eachChild=function(t){t(this.input);for(var e=0,r=this.outputs;e=r.length)throw new ue("Array index out of bounds: "+e+" > "+(r.length-1)+".");if(e!==Math.floor(e))throw new ue("Array index must be an integer, but found "+e+" instead.");return r[e]},ur.prototype.eachChild=function(t){t(this.index),t(this.input)},ur.prototype.outputDefined=function(){return!1},ur.prototype.serialize=function(){return["at",this.index.serialize(),this.input.serialize()]};var fr=function(t,e){this.type=jt,this.needle=t,this.haystack=e};fr.parse=function(t,e){if(3!==t.length)return e.error("Expected 2 arguments, but found "+(t.length-1)+" instead.");var r=e.parse(t[1],1,Ht),n=e.parse(t[2],2,Ht);return r&&n?Kt(r.type,[jt,Nt,Bt,Ft,Ht])?new fr(r,n):e.error("Expected first argument to be of type boolean, string, number or null, but found "+Xt(r.type)+" instead"):null},fr.prototype.evaluate=function(t){var e=this.needle.evaluate(t),r=this.haystack.evaluate(t);if(!r)return!1;if(!Qt(e,["boolean","string","number","null"]))throw new ue("Expected first argument to be of type boolean, string, number or null, but found "+Xt(se(e))+" instead.");if(!Qt(r,["string","array"]))throw new ue("Expected second argument to be of type array or string, but found "+Xt(se(r))+" instead.");return r.indexOf(e)>=0},fr.prototype.eachChild=function(t){t(this.needle),t(this.haystack)},fr.prototype.outputDefined=function(){return!0},fr.prototype.serialize=function(){return["in",this.needle.serialize(),this.haystack.serialize()]};var hr=function(t,e,r){this.type=Bt,this.needle=t,this.haystack=e,this.fromIndex=r};hr.parse=function(t,e){if(t.length<=2||t.length>=5)return e.error("Expected 3 or 4 arguments, but found "+(t.length-1)+" instead.");var r=e.parse(t[1],1,Ht),n=e.parse(t[2],2,Ht);if(!r||!n)return null;if(!Kt(r.type,[jt,Nt,Bt,Ft,Ht]))return e.error("Expected first argument to be of type boolean, string, number or null, but found "+Xt(r.type)+" instead");if(4===t.length){var i=e.parse(t[3],3,Bt);return i?new hr(r,n,i):null}return new hr(r,n)},hr.prototype.evaluate=function(t){var e=this.needle.evaluate(t),r=this.haystack.evaluate(t);if(!Qt(e,["boolean","string","number","null"]))throw new ue("Expected first argument to be of type boolean, string, number or null, but found "+Xt(se(e))+" instead.");if(!Qt(r,["string","array"]))throw new ue("Expected second argument to be of type array or string, but found "+Xt(se(r))+" instead.");if(this.fromIndex){var n=this.fromIndex.evaluate(t);return r.indexOf(e,n)}return r.indexOf(e)},hr.prototype.eachChild=function(t){t(this.needle),t(this.haystack),this.fromIndex&&t(this.fromIndex)},hr.prototype.outputDefined=function(){return!1},hr.prototype.serialize=function(){if(null!=this.fromIndex&&void 0!==this.fromIndex){var t=this.fromIndex.serialize();return["index-of",this.needle.serialize(),this.haystack.serialize(),t]}return["index-of",this.needle.serialize(),this.haystack.serialize()]};var pr=function(t,e,r,n,i,a){this.inputType=t,this.type=e,this.input=r,this.cases=n,this.outputs=i,this.otherwise=a};pr.parse=function(t,e){if(t.length<5)return e.error("Expected at least 4 arguments, but found only "+(t.length-1)+".");if(t.length%2!=1)return e.error("Expected an even number of arguments.");var r,n;e.expectedType&&"value"!==e.expectedType.kind&&(n=e.expectedType);for(var i={},a=[],o=2;oNumber.MAX_SAFE_INTEGER)return c.error("Branch labels must be integers no larger than "+Number.MAX_SAFE_INTEGER+".");if("number"==typeof h&&Math.floor(h)!==h)return c.error("Numeric branch labels must be integer values.");if(r){if(c.checkSubtype(r,se(h)))return null}else r=se(h);if(void 0!==i[String(h)])return c.error("Branch labels must be unique.");i[String(h)]=a.length}var p=e.parse(l,o,n);if(!p)return null;n=n||p.type,a.push(p)}var d=e.parse(t[1],1,Ht);if(!d)return null;var m=e.parse(t[t.length-1],t.length-1,n);return m?"value"!==d.type.kind&&e.concat(1).checkSubtype(r,d.type)?null:new pr(r,n,d,i,a,m):null},pr.prototype.evaluate=function(t){var e=this.input.evaluate(t);return(se(e)===this.inputType&&this.outputs[this.cases[e]]||this.otherwise).evaluate(t)},pr.prototype.eachChild=function(t){t(this.input),this.outputs.forEach(t),t(this.otherwise)},pr.prototype.outputDefined=function(){return this.outputs.every((function(t){return t.outputDefined()}))&&this.otherwise.outputDefined()},pr.prototype.serialize=function(){for(var t=this,e=["match",this.input.serialize()],r=[],n={},i=0,a=Object.keys(this.cases).sort();i=5)return e.error("Expected 3 or 4 arguments, but found "+(t.length-1)+" instead.");var r=e.parse(t[1],1,Ht),n=e.parse(t[2],2,Bt);if(!r||!n)return null;if(!Kt(r.type,[Wt(Ht),Nt,Ht]))return e.error("Expected first argument to be of type array or string, but found "+Xt(r.type)+" instead");if(4===t.length){var i=e.parse(t[3],3,Bt);return i?new mr(r.type,r,n,i):null}return new mr(r.type,r,n)},mr.prototype.evaluate=function(t){var e=this.input.evaluate(t),r=this.beginIndex.evaluate(t);if(!Qt(e,["string","array"]))throw new ue("Expected first argument to be of type array or string, but found "+Xt(se(e))+" instead.");if(this.endIndex){var n=this.endIndex.evaluate(t);return e.slice(r,n)}return e.slice(r)},mr.prototype.eachChild=function(t){t(this.input),t(this.beginIndex),this.endIndex&&t(this.endIndex)},mr.prototype.outputDefined=function(){return!1},mr.prototype.serialize=function(){if(null!=this.endIndex&&void 0!==this.endIndex){var t=this.endIndex.serialize();return["slice",this.input.serialize(),this.beginIndex.serialize(),t]}return["slice",this.input.serialize(),this.beginIndex.serialize()]};var xr=yr("==",(function(t,e,r){return e===r}),vr),br=yr("!=",(function(t,e,r){return e!==r}),(function(t,e,r,n){return!vr(0,e,r,n)})),_r=yr("<",(function(t,e,r){return e",(function(t,e,r){return e>r}),(function(t,e,r,n){return n.compare(e,r)>0})),Tr=yr("<=",(function(t,e,r){return e<=r}),(function(t,e,r,n){return n.compare(e,r)<=0})),kr=yr(">=",(function(t,e,r){return e>=r}),(function(t,e,r,n){return n.compare(e,r)>=0})),Ar=function(t,e,r,n,i){this.type=Nt,this.number=t,this.locale=e,this.currency=r,this.minFractionDigits=n,this.maxFractionDigits=i};Ar.parse=function(t,e){if(3!==t.length)return e.error("Expected two arguments.");var r=e.parse(t[1],1,Bt);if(!r)return null;var n=t[2];if("object"!=typeof n||Array.isArray(n))return e.error("NumberFormat options argument must be an object.");var i=null;if(n.locale&&!(i=e.parse(n.locale,1,Nt)))return null;var a=null;if(n.currency&&!(a=e.parse(n.currency,1,Nt)))return null;var o=null;if(n["min-fraction-digits"]&&!(o=e.parse(n["min-fraction-digits"],1,Bt)))return null;var s=null;return n["max-fraction-digits"]&&!(s=e.parse(n["max-fraction-digits"],1,Bt))?null:new Ar(r,i,a,o,s)},Ar.prototype.evaluate=function(t){return new Intl.NumberFormat(this.locale?this.locale.evaluate(t):[],{style:this.currency?"currency":"decimal",currency:this.currency?this.currency.evaluate(t):void 0,minimumFractionDigits:this.minFractionDigits?this.minFractionDigits.evaluate(t):void 0,maximumFractionDigits:this.maxFractionDigits?this.maxFractionDigits.evaluate(t):void 0}).format(this.number.evaluate(t))},Ar.prototype.eachChild=function(t){t(this.number),this.locale&&t(this.locale),this.currency&&t(this.currency),this.minFractionDigits&&t(this.minFractionDigits),this.maxFractionDigits&&t(this.maxFractionDigits)},Ar.prototype.outputDefined=function(){return!1},Ar.prototype.serialize=function(){var t={};return this.locale&&(t.locale=this.locale.serialize()),this.currency&&(t.currency=this.currency.serialize()),this.minFractionDigits&&(t["min-fraction-digits"]=this.minFractionDigits.serialize()),this.maxFractionDigits&&(t["max-fraction-digits"]=this.maxFractionDigits.serialize()),["number-format",this.number.serialize(),t]};var Mr=function(t){this.type=Bt,this.input=t};Mr.parse=function(t,e){if(2!==t.length)return e.error("Expected 1 argument, but found "+(t.length-1)+" instead.");var r=e.parse(t[1],1);return r?"array"!==r.type.kind&&"string"!==r.type.kind&&"value"!==r.type.kind?e.error("Expected argument of type string or array, but found "+Xt(r.type)+" instead."):new Mr(r):null},Mr.prototype.evaluate=function(t){var e=this.input.evaluate(t);if("string"==typeof e)return e.length;if(Array.isArray(e))return e.length;throw new ue("Expected value to be of type string or array, but found "+Xt(se(e))+" instead.")},Mr.prototype.eachChild=function(t){t(this.input)},Mr.prototype.outputDefined=function(){return!1},Mr.prototype.serialize=function(){var t=["length"];return this.eachChild((function(e){t.push(e.serialize())})),t};var Sr={"==":xr,"!=":br,">":wr,"<":_r,">=":kr,"<=":Tr,array:he,at:ur,boolean:he,case:dr,coalesce:lr,collator:be,format:pe,image:de,in:fr,"index-of":hr,interpolate:or,"interpolate-hcl":or,"interpolate-lab":or,length:Mr,let:cr,literal:ce,match:pr,number:he,"number-format":Ar,object:he,slice:mr,step:He,string:he,"to-boolean":ge,"to-color":ge,"to-number":ge,"to-string":ge,var:je,within:Re};function Er(t,e){var r=e[0],n=e[1],i=e[2],a=e[3];r=r.evaluate(t),n=n.evaluate(t),i=i.evaluate(t);var o=a?a.evaluate(t):1,s=ae(r,n,i,o);if(s)throw new ue(s);return new te(r/255*o,n/255*o,i/255*o,o)}function Lr(t,e){return t in e}function Cr(t,e){var r=e[t];return void 0===r?null:r}function Pr(t){return{type:t}}function Ir(t){return{result:"success",value:t}}function Or(t){return{result:"error",value:t}}function zr(t){return"data-driven"===t["property-type"]||"cross-faded-data-driven"===t["property-type"]}function Dr(t){return!!t.expression&&t.expression.parameters.indexOf("zoom")>-1}function Rr(t){return!!t.expression&&t.expression.interpolated}function Fr(t){return t instanceof Number?"number":t instanceof String?"string":t instanceof Boolean?"boolean":Array.isArray(t)?"array":null===t?"null":typeof t}function Br(t){return"object"==typeof t&&null!==t&&!Array.isArray(t)}function Nr(t){return t}function jr(t,e,r){return void 0!==t?t:void 0!==e?e:void 0!==r?r:void 0}function Ur(t,e,r,n,i){return jr(typeof r===i?n[r]:void 0,t.default,e.default)}function Vr(t,e,r){if("number"!==Fr(r))return jr(t.default,e.default);var n=t.stops.length;if(1===n)return t.stops[0][1];if(r<=t.stops[0][0])return t.stops[0][1];if(r>=t.stops[n-1][0])return t.stops[n-1][1];var i=Ve(t.stops.map((function(t){return t[0]})),r);return t.stops[i][1]}function Hr(t,e,r){var n=void 0!==t.base?t.base:1;if("number"!==Fr(r))return jr(t.default,e.default);var i=t.stops.length;if(1===i)return t.stops[0][1];if(r<=t.stops[0][0])return t.stops[0][1];if(r>=t.stops[i-1][0])return t.stops[i-1][1];var a=Ve(t.stops.map((function(t){return t[0]})),r),o=function(t,e,r,n){var i=n-r,a=t-r;return 0===i?0:1===e?a/i:(Math.pow(e,a)-1)/(Math.pow(e,i)-1)}(r,n,t.stops[a][0],t.stops[a+1][0]),s=t.stops[a][1],l=t.stops[a+1][1],c=Ge[e.type]||Nr;if(t.colorSpace&&"rgb"!==t.colorSpace){var u=ar[t.colorSpace];c=function(t,e){return u.reverse(u.interpolate(u.forward(t),u.forward(e),o))}}return"function"==typeof s.evaluate?{evaluate:function(){for(var t=[],e=arguments.length;e--;)t[e]=arguments[e];var r=s.evaluate.apply(void 0,t),n=l.evaluate.apply(void 0,t);if(void 0!==r&&void 0!==n)return c(r,n,o)}}:c(s,l,o)}function qr(t,e,r){return"color"===e.type?r=te.parse(r):"formatted"===e.type?r=ne.fromString(r.toString()):"resolvedImage"===e.type?r=ie.fromString(r.toString()):Fr(r)===e.type||"enum"===e.type&&e.values[r]||(r=void 0),jr(r,t.default,e.default)}xe.register(Sr,{error:[{kind:"error"},[Nt],function(t,e){var r=e[0];throw new ue(r.evaluate(t))}],typeof:[Nt,[Ht],function(t,e){return Xt(se(e[0].evaluate(t)))}],"to-rgba":[Wt(Bt,4),[Ut],function(t,e){return e[0].evaluate(t).toArray()}],rgb:[Ut,[Bt,Bt,Bt],Er],rgba:[Ut,[Bt,Bt,Bt,Bt],Er],has:{type:jt,overloads:[[[Nt],function(t,e){return Lr(e[0].evaluate(t),t.properties())}],[[Nt,Vt],function(t,e){var r=e[0],n=e[1];return Lr(r.evaluate(t),n.evaluate(t))}]]},get:{type:Ht,overloads:[[[Nt],function(t,e){return Cr(e[0].evaluate(t),t.properties())}],[[Nt,Vt],function(t,e){var r=e[0],n=e[1];return Cr(r.evaluate(t),n.evaluate(t))}]]},"feature-state":[Ht,[Nt],function(t,e){return Cr(e[0].evaluate(t),t.featureState||{})}],properties:[Vt,[],function(t){return t.properties()}],"geometry-type":[Nt,[],function(t){return t.geometryType()}],id:[Ht,[],function(t){return t.id()}],zoom:[Bt,[],function(t){return t.globals.zoom}],"heatmap-density":[Bt,[],function(t){return t.globals.heatmapDensity||0}],"line-progress":[Bt,[],function(t){return t.globals.lineProgress||0}],accumulated:[Ht,[],function(t){return void 0===t.globals.accumulated?null:t.globals.accumulated}],"+":[Bt,Pr(Bt),function(t,e){for(var r=0,n=0,i=e;n":[jt,[Nt,Ht],function(t,e){var r=e[0],n=e[1],i=t.properties()[r.value],a=n.value;return typeof i==typeof a&&i>a}],"filter-id->":[jt,[Ht],function(t,e){var r=e[0],n=t.id(),i=r.value;return typeof n==typeof i&&n>i}],"filter-<=":[jt,[Nt,Ht],function(t,e){var r=e[0],n=e[1],i=t.properties()[r.value],a=n.value;return typeof i==typeof a&&i<=a}],"filter-id-<=":[jt,[Ht],function(t,e){var r=e[0],n=t.id(),i=r.value;return typeof n==typeof i&&n<=i}],"filter->=":[jt,[Nt,Ht],function(t,e){var r=e[0],n=e[1],i=t.properties()[r.value],a=n.value;return typeof i==typeof a&&i>=a}],"filter-id->=":[jt,[Ht],function(t,e){var r=e[0],n=t.id(),i=r.value;return typeof n==typeof i&&n>=i}],"filter-has":[jt,[Ht],function(t,e){return e[0].value in t.properties()}],"filter-has-id":[jt,[],function(t){return null!==t.id()&&void 0!==t.id()}],"filter-type-in":[jt,[Wt(Nt)],function(t,e){return e[0].value.indexOf(t.geometryType())>=0}],"filter-id-in":[jt,[Wt(Ht)],function(t,e){return e[0].value.indexOf(t.id())>=0}],"filter-in-small":[jt,[Nt,Wt(Ht)],function(t,e){var r=e[0];return e[1].value.indexOf(t.properties()[r.value])>=0}],"filter-in-large":[jt,[Nt,Wt(Ht)],function(t,e){var r=e[0],n=e[1];return function(t,e,r,n){for(;r<=n;){var i=r+n>>1;if(e[i]===t)return!0;e[i]>t?n=i-1:r=i+1}return!1}(t.properties()[r.value],n.value,0,n.value.length-1)}],all:{type:jt,overloads:[[[jt,jt],function(t,e){var r=e[0],n=e[1];return r.evaluate(t)&&n.evaluate(t)}],[Pr(jt),function(t,e){for(var r=0,n=e;r0&&"string"==typeof t[0]&&t[0]in Sr}function Wr(t,e){var r=new Ue(Sr,[],e?function(t){var e={color:Ut,string:Nt,number:Bt,enum:Nt,boolean:jt,formatted:Gt,resolvedImage:Yt};if("array"===t.type)return Wt(e[t.value]||Ht,t.length);return e[t.type]}(e):void 0),n=r.parse(t,void 0,void 0,void 0,e&&"string"===e.type?{typeAnnotation:"coerce"}:void 0);return n?Ir(new Gr(n,e)):Or(r.errors)}Gr.prototype.evaluateWithoutErrorHandling=function(t,e,r,n,i,a){return this._evaluator.globals=t,this._evaluator.feature=e,this._evaluator.featureState=r,this._evaluator.canonical=n,this._evaluator.availableImages=i||null,this._evaluator.formattedSection=a,this.expression.evaluate(this._evaluator)},Gr.prototype.evaluate=function(t,e,r,n,i,a){this._evaluator.globals=t,this._evaluator.feature=e||null,this._evaluator.featureState=r||null,this._evaluator.canonical=n,this._evaluator.availableImages=i||null,this._evaluator.formattedSection=a||null;try{var o=this.expression.evaluate(this._evaluator);if(null==o||"number"==typeof o&&o!=o)return this._defaultValue;if(this._enumValues&&!(o in this._enumValues))throw new ue("Expected value to be one of "+Object.keys(this._enumValues).map((function(t){return JSON.stringify(t)})).join(", ")+", but found "+JSON.stringify(o)+" instead.");return o}catch(t){return this._warningHistory[t.message]||(this._warningHistory[t.message]=!0,"undefined"!=typeof console&&console.warn(t.message)),this._defaultValue}};var Xr=function(t,e){this.kind=t,this._styleExpression=e,this.isStateDependent="constant"!==t&&!Be(e.expression)};Xr.prototype.evaluateWithoutErrorHandling=function(t,e,r,n,i,a){return this._styleExpression.evaluateWithoutErrorHandling(t,e,r,n,i,a)},Xr.prototype.evaluate=function(t,e,r,n,i,a){return this._styleExpression.evaluate(t,e,r,n,i,a)};var Zr=function(t,e,r,n){this.kind=t,this.zoomStops=r,this._styleExpression=e,this.isStateDependent="camera"!==t&&!Be(e.expression),this.interpolationType=n};function Jr(t,e){if("error"===(t=Wr(t,e)).result)return t;var r=t.value.expression,n=Fe(r);if(!n&&!zr(e))return Or([new Dt("","data expressions not supported")]);var i=Ne(r,["zoom"]);if(!i&&!Dr(e))return Or([new Dt("","zoom expressions not supported")]);var a=function t(e){var r=null;if(e instanceof cr)r=t(e.result);else if(e instanceof lr)for(var n=0,i=e.args;nn.maximum?[new Ct(e,r,r+" is greater than the maximum value "+n.maximum)]:[]}function en(t){var e,r,n,i=t.valueSpec,a=Ot(t.value.type),o={},s="categorical"!==a&&void 0===t.value.property,l=!s,c="array"===Fr(t.value.stops)&&"array"===Fr(t.value.stops[0])&&"object"===Fr(t.value.stops[0][0]),u=Qr({key:t.key,value:t.value,valueSpec:t.styleSpec.function,style:t.style,styleSpec:t.styleSpec,objectElementValidators:{stops:function(t){if("identity"===a)return[new Ct(t.key,t.value,'identity function may not have a "stops" property')];var e=[],r=t.value;e=e.concat($r({key:t.key,value:r,valueSpec:t.valueSpec,style:t.style,styleSpec:t.styleSpec,arrayElementValidator:f})),"array"===Fr(r)&&0===r.length&&e.push(new Ct(t.key,r,"array must have at least one stop"));return e},default:function(t){return kn({key:t.key,value:t.value,valueSpec:i,style:t.style,styleSpec:t.styleSpec})}}});return"identity"===a&&s&&u.push(new Ct(t.key,t.value,'missing required property "property"')),"identity"===a||t.value.stops||u.push(new Ct(t.key,t.value,'missing required property "stops"')),"exponential"===a&&t.valueSpec.expression&&!Rr(t.valueSpec)&&u.push(new Ct(t.key,t.value,"exponential functions not supported")),t.styleSpec.$version>=8&&(l&&!zr(t.valueSpec)?u.push(new Ct(t.key,t.value,"property functions not supported")):s&&!Dr(t.valueSpec)&&u.push(new Ct(t.key,t.value,"zoom functions not supported"))),"categorical"!==a&&!c||void 0!==t.value.property||u.push(new Ct(t.key,t.value,'"property" property is required')),u;function f(t){var e=[],a=t.value,s=t.key;if("array"!==Fr(a))return[new Ct(s,a,"array expected, "+Fr(a)+" found")];if(2!==a.length)return[new Ct(s,a,"array length 2 expected, length "+a.length+" found")];if(c){if("object"!==Fr(a[0]))return[new Ct(s,a,"object expected, "+Fr(a[0])+" found")];if(void 0===a[0].zoom)return[new Ct(s,a,"object stop key must have zoom")];if(void 0===a[0].value)return[new Ct(s,a,"object stop key must have value")];if(n&&n>Ot(a[0].zoom))return[new Ct(s,a[0].zoom,"stop zoom values must appear in ascending order")];Ot(a[0].zoom)!==n&&(n=Ot(a[0].zoom),r=void 0,o={}),e=e.concat(Qr({key:s+"[0]",value:a[0],valueSpec:{zoom:{}},style:t.style,styleSpec:t.styleSpec,objectElementValidators:{zoom:tn,value:h}}))}else e=e.concat(h({key:s+"[0]",value:a[0],valueSpec:{},style:t.style,styleSpec:t.styleSpec},a));return Yr(zt(a[1]))?e.concat([new Ct(s+"[1]",a[1],"expressions are not allowed in function stops.")]):e.concat(kn({key:s+"[1]",value:a[1],valueSpec:i,style:t.style,styleSpec:t.styleSpec}))}function h(t,n){var s=Fr(t.value),l=Ot(t.value),c=null!==t.value?t.value:n;if(e){if(s!==e)return[new Ct(t.key,c,s+" stop domain type must match previous stop domain type "+e)]}else e=s;if("number"!==s&&"string"!==s&&"boolean"!==s)return[new Ct(t.key,c,"stop domain value must be a number, string, or boolean")];if("number"!==s&&"categorical"!==a){var u="number expected, "+s+" found";return zr(i)&&void 0===a&&(u+='\nIf you intended to use a categorical function, specify `"type": "categorical"`.'),[new Ct(t.key,c,u)]}return"categorical"!==a||"number"!==s||isFinite(l)&&Math.floor(l)===l?"categorical"!==a&&"number"===s&&void 0!==r&&l=2&&"$id"!==t[1]&&"$type"!==t[1];case"in":return t.length>=3&&("string"!=typeof t[1]||Array.isArray(t[2]));case"!in":case"!has":case"none":return!1;case"==":case"!=":case">":case">=":case"<":case"<=":return 3!==t.length||Array.isArray(t[1])||Array.isArray(t[2]);case"any":case"all":for(var e=0,r=t.slice(1);ee?1:0}function cn(t){if(!t)return!0;var e,r=t[0];return t.length<=1?"any"!==r:"=="===r?un(t[1],t[2],"=="):"!="===r?pn(un(t[1],t[2],"==")):"<"===r||">"===r||"<="===r||">="===r?un(t[1],t[2],r):"any"===r?(e=t.slice(1),["any"].concat(e.map(cn))):"all"===r?["all"].concat(t.slice(1).map(cn)):"none"===r?["all"].concat(t.slice(1).map(cn).map(pn)):"in"===r?fn(t[1],t.slice(2)):"!in"===r?pn(fn(t[1],t.slice(2))):"has"===r?hn(t[1]):"!has"===r?pn(hn(t[1])):"within"!==r||t}function un(t,e,r){switch(t){case"$type":return["filter-type-"+r,e];case"$id":return["filter-id-"+r,e];default:return["filter-"+r,t,e]}}function fn(t,e){if(0===e.length)return!1;switch(t){case"$type":return["filter-type-in",["literal",e]];case"$id":return["filter-id-in",["literal",e]];default:return e.length>200&&!e.some((function(t){return typeof t!=typeof e[0]}))?["filter-in-large",t,["literal",e.sort(ln)]]:["filter-in-small",t,["literal",e]]}}function hn(t){switch(t){case"$type":return!0;case"$id":return["filter-has-id"];default:return["filter-has",t]}}function pn(t){return["!",t]}function dn(t){return an(zt(t.value))?rn(It({},t,{expressionContext:"filter",valueSpec:{value:"boolean"}})):function t(e){var r=e.value,n=e.key;if("array"!==Fr(r))return[new Ct(n,r,"array expected, "+Fr(r)+" found")];var i,a=e.styleSpec,o=[];if(r.length<1)return[new Ct(n,r,"filter array must have at least 1 element")];switch(o=o.concat(nn({key:n+"[0]",value:r[0],valueSpec:a.filter_operator,style:e.style,styleSpec:e.styleSpec})),Ot(r[0])){case"<":case"<=":case">":case">=":r.length>=2&&"$type"===Ot(r[1])&&o.push(new Ct(n,r,'"$type" cannot be use with operator "'+r[0]+'"'));case"==":case"!=":3!==r.length&&o.push(new Ct(n,r,'filter array for operator "'+r[0]+'" must have 3 elements'));case"in":case"!in":r.length>=2&&"string"!==(i=Fr(r[1]))&&o.push(new Ct(n+"[1]",r[1],"string expected, "+i+" found"));for(var s=2;s=u[p+0]&&n>=u[p+1])?(o[h]=!0,a.push(c[h])):o[h]=!1}}},Dn.prototype._forEachCell=function(t,e,r,n,i,a,o,s){for(var l=this._convertToCellCoord(t),c=this._convertToCellCoord(e),u=this._convertToCellCoord(r),f=this._convertToCellCoord(n),h=l;h<=u;h++)for(var p=c;p<=f;p++){var d=this.d*p+h;if((!s||s(this._convertFromCellCoord(h),this._convertFromCellCoord(p),this._convertFromCellCoord(h+1),this._convertFromCellCoord(p+1)))&&i.call(this,t,e,r,n,d,a,o,s))return}},Dn.prototype._convertFromCellCoord=function(t){return(t-this.padding)/this.scale},Dn.prototype._convertToCellCoord=function(t){return Math.max(0,Math.min(this.d-1,Math.floor(t*this.scale)+this.padding))},Dn.prototype.toArrayBuffer=function(){if(this.arrayBuffer)return this.arrayBuffer;for(var t=this.cells,e=3+this.cells.length+1+1,r=0,n=0;n=0)){var f=t[u];c[u]=Bn[l].shallow.indexOf(u)>=0?f:Hn(f,e)}t instanceof Error&&(c.message=t.message)}if(c.$name)throw new Error("$name property is reserved for worker serialization logic.");return"Object"!==l&&(c.$name=l),c}throw new Error("can't serialize object of type "+typeof t)}function qn(t){if(null==t||"boolean"==typeof t||"number"==typeof t||"string"==typeof t||t instanceof Boolean||t instanceof Number||t instanceof String||t instanceof Date||t instanceof RegExp||Un(t)||Vn(t)||ArrayBuffer.isView(t)||t instanceof Rn)return t;if(Array.isArray(t))return t.map(qn);if("object"==typeof t){var e=t.$name||"Object",r=Bn[e].klass;if(!r)throw new Error("can't deserialize unregistered class "+e);if(r.deserialize)return r.deserialize(t);for(var n=Object.create(r.prototype),i=0,a=Object.keys(t);i=0?s:qn(s)}}return n}throw new Error("can't deserialize object of type "+typeof t)}var Gn=function(){this.first=!0};Gn.prototype.update=function(t,e){var r=Math.floor(t);return this.first?(this.first=!1,this.lastIntegerZoom=r,this.lastIntegerZoomTime=0,this.lastZoom=t,this.lastFloorZoom=r,!0):(this.lastFloorZoom>r?(this.lastIntegerZoom=r+1,this.lastIntegerZoomTime=e):this.lastFloorZoom=128&&t<=255},Arabic:function(t){return t>=1536&&t<=1791},"Arabic Supplement":function(t){return t>=1872&&t<=1919},"Arabic Extended-A":function(t){return t>=2208&&t<=2303},"Hangul Jamo":function(t){return t>=4352&&t<=4607},"Unified Canadian Aboriginal Syllabics":function(t){return t>=5120&&t<=5759},Khmer:function(t){return t>=6016&&t<=6143},"Unified Canadian Aboriginal Syllabics Extended":function(t){return t>=6320&&t<=6399},"General Punctuation":function(t){return t>=8192&&t<=8303},"Letterlike Symbols":function(t){return t>=8448&&t<=8527},"Number Forms":function(t){return t>=8528&&t<=8591},"Miscellaneous Technical":function(t){return t>=8960&&t<=9215},"Control Pictures":function(t){return t>=9216&&t<=9279},"Optical Character Recognition":function(t){return t>=9280&&t<=9311},"Enclosed Alphanumerics":function(t){return t>=9312&&t<=9471},"Geometric Shapes":function(t){return t>=9632&&t<=9727},"Miscellaneous Symbols":function(t){return t>=9728&&t<=9983},"Miscellaneous Symbols and Arrows":function(t){return t>=11008&&t<=11263},"CJK Radicals Supplement":function(t){return t>=11904&&t<=12031},"Kangxi Radicals":function(t){return t>=12032&&t<=12255},"Ideographic Description Characters":function(t){return t>=12272&&t<=12287},"CJK Symbols and Punctuation":function(t){return t>=12288&&t<=12351},Hiragana:function(t){return t>=12352&&t<=12447},Katakana:function(t){return t>=12448&&t<=12543},Bopomofo:function(t){return t>=12544&&t<=12591},"Hangul Compatibility Jamo":function(t){return t>=12592&&t<=12687},Kanbun:function(t){return t>=12688&&t<=12703},"Bopomofo Extended":function(t){return t>=12704&&t<=12735},"CJK Strokes":function(t){return t>=12736&&t<=12783},"Katakana Phonetic Extensions":function(t){return t>=12784&&t<=12799},"Enclosed CJK Letters and Months":function(t){return t>=12800&&t<=13055},"CJK Compatibility":function(t){return t>=13056&&t<=13311},"CJK Unified Ideographs Extension A":function(t){return t>=13312&&t<=19903},"Yijing Hexagram Symbols":function(t){return t>=19904&&t<=19967},"CJK Unified Ideographs":function(t){return t>=19968&&t<=40959},"Yi Syllables":function(t){return t>=40960&&t<=42127},"Yi Radicals":function(t){return t>=42128&&t<=42191},"Hangul Jamo Extended-A":function(t){return t>=43360&&t<=43391},"Hangul Syllables":function(t){return t>=44032&&t<=55215},"Hangul Jamo Extended-B":function(t){return t>=55216&&t<=55295},"Private Use Area":function(t){return t>=57344&&t<=63743},"CJK Compatibility Ideographs":function(t){return t>=63744&&t<=64255},"Arabic Presentation Forms-A":function(t){return t>=64336&&t<=65023},"Vertical Forms":function(t){return t>=65040&&t<=65055},"CJK Compatibility Forms":function(t){return t>=65072&&t<=65103},"Small Form Variants":function(t){return t>=65104&&t<=65135},"Arabic Presentation Forms-B":function(t){return t>=65136&&t<=65279},"Halfwidth and Fullwidth Forms":function(t){return t>=65280&&t<=65519}};function Wn(t){for(var e=0,r=t;e=65097&&t<=65103)||(!!Yn["CJK Compatibility Ideographs"](t)||(!!Yn["CJK Compatibility"](t)||(!!Yn["CJK Radicals Supplement"](t)||(!!Yn["CJK Strokes"](t)||(!(!Yn["CJK Symbols and Punctuation"](t)||t>=12296&&t<=12305||t>=12308&&t<=12319||12336===t)||(!!Yn["CJK Unified Ideographs Extension A"](t)||(!!Yn["CJK Unified Ideographs"](t)||(!!Yn["Enclosed CJK Letters and Months"](t)||(!!Yn["Hangul Compatibility Jamo"](t)||(!!Yn["Hangul Jamo Extended-A"](t)||(!!Yn["Hangul Jamo Extended-B"](t)||(!!Yn["Hangul Jamo"](t)||(!!Yn["Hangul Syllables"](t)||(!!Yn.Hiragana(t)||(!!Yn["Ideographic Description Characters"](t)||(!!Yn.Kanbun(t)||(!!Yn["Kangxi Radicals"](t)||(!!Yn["Katakana Phonetic Extensions"](t)||(!(!Yn.Katakana(t)||12540===t)||(!(!Yn["Halfwidth and Fullwidth Forms"](t)||65288===t||65289===t||65293===t||t>=65306&&t<=65310||65339===t||65341===t||65343===t||t>=65371&&t<=65503||65507===t||t>=65512&&t<=65519)||(!(!Yn["Small Form Variants"](t)||t>=65112&&t<=65118||t>=65123&&t<=65126)||(!!Yn["Unified Canadian Aboriginal Syllabics"](t)||(!!Yn["Unified Canadian Aboriginal Syllabics Extended"](t)||(!!Yn["Vertical Forms"](t)||(!!Yn["Yijing Hexagram Symbols"](t)||(!!Yn["Yi Syllables"](t)||!!Yn["Yi Radicals"](t))))))))))))))))))))))))))))))}function Jn(t){return!(Zn(t)||function(t){return!(!Yn["Latin-1 Supplement"](t)||167!==t&&169!==t&&174!==t&&177!==t&&188!==t&&189!==t&&190!==t&&215!==t&&247!==t)||(!(!Yn["General Punctuation"](t)||8214!==t&&8224!==t&&8225!==t&&8240!==t&&8241!==t&&8251!==t&&8252!==t&&8258!==t&&8263!==t&&8264!==t&&8265!==t&&8273!==t)||(!!Yn["Letterlike Symbols"](t)||(!!Yn["Number Forms"](t)||(!(!Yn["Miscellaneous Technical"](t)||!(t>=8960&&t<=8967||t>=8972&&t<=8991||t>=8996&&t<=9e3||9003===t||t>=9085&&t<=9114||t>=9150&&t<=9165||9167===t||t>=9169&&t<=9179||t>=9186&&t<=9215))||(!(!Yn["Control Pictures"](t)||9251===t)||(!!Yn["Optical Character Recognition"](t)||(!!Yn["Enclosed Alphanumerics"](t)||(!!Yn["Geometric Shapes"](t)||(!(!Yn["Miscellaneous Symbols"](t)||t>=9754&&t<=9759)||(!(!Yn["Miscellaneous Symbols and Arrows"](t)||!(t>=11026&&t<=11055||t>=11088&&t<=11097||t>=11192&&t<=11243))||(!!Yn["CJK Symbols and Punctuation"](t)||(!!Yn.Katakana(t)||(!!Yn["Private Use Area"](t)||(!!Yn["CJK Compatibility Forms"](t)||(!!Yn["Small Form Variants"](t)||(!!Yn["Halfwidth and Fullwidth Forms"](t)||(8734===t||8756===t||8757===t||t>=9984&&t<=10087||t>=10102&&t<=10131||65532===t||65533===t)))))))))))))))))}(t))}function Kn(t){return t>=1424&&t<=2303||Yn["Arabic Presentation Forms-A"](t)||Yn["Arabic Presentation Forms-B"](t)}function Qn(t,e){return!(!e&&Kn(t))&&!(t>=2304&&t<=3583||t>=3840&&t<=4255||Yn.Khmer(t))}function $n(t){for(var e=0,r=t;e-1&&(ai=ni),ii&&ii(t)};function li(){ci.fire(new Mt("pluginStateChange",{pluginStatus:ai,pluginURL:oi}))}var ci=new Et,ui=function(){return ai},fi=function(){if(ai!==ti||!oi)throw new Error("rtl-text-plugin cannot be downloaded unless a pluginURL is specified");ai=ei,li(),oi&&xt({url:oi},(function(t){t?si(t):(ai=ri,li())}))},hi={applyArabicShaping:null,processBidirectionalText:null,processStyledBidirectionalText:null,isLoaded:function(){return ai===ri||null!=hi.applyArabicShaping},isLoading:function(){return ai===ei},setState:function(t){ai=t.pluginStatus,oi=t.pluginURL},isParsed:function(){return null!=hi.applyArabicShaping&&null!=hi.processBidirectionalText&&null!=hi.processStyledBidirectionalText},getPluginURL:function(){return oi}},pi=function(t,e){this.zoom=t,e?(this.now=e.now,this.fadeDuration=e.fadeDuration,this.zoomHistory=e.zoomHistory,this.transition=e.transition):(this.now=0,this.fadeDuration=0,this.zoomHistory=new Gn,this.transition={})};pi.prototype.isSupportedScript=function(t){return function(t,e){for(var r=0,n=t;rthis.zoomHistory.lastIntegerZoom?{fromScale:2,toScale:1,t:e+(1-e)*r}:{fromScale:.5,toScale:1,t:1-(1-r)*e}};var di=function(t,e){this.property=t,this.value=e,this.expression=function(t,e){if(Br(t))return new Kr(t,e);if(Yr(t)){var r=Jr(t,e);if("error"===r.result)throw new Error(r.value.map((function(t){return t.key+": "+t.message})).join(", "));return r.value}var n=t;return"string"==typeof t&&"color"===e.type&&(n=te.parse(t)),{kind:"constant",evaluate:function(){return n}}}(void 0===e?t.specification.default:e,t.specification)};di.prototype.isDataDriven=function(){return"source"===this.expression.kind||"composite"===this.expression.kind},di.prototype.possiblyEvaluate=function(t,e,r){return this.property.possiblyEvaluate(this,t,e,r)};var mi=function(t){this.property=t,this.value=new di(t,void 0)};mi.prototype.transitioned=function(t,e){return new vi(this.property,this.value,e,u({},t.transition,this.transition),t.now)},mi.prototype.untransitioned=function(){return new vi(this.property,this.value,null,{},0)};var gi=function(t){this._properties=t,this._values=Object.create(t.defaultTransitionablePropertyValues)};gi.prototype.getValue=function(t){return x(this._values[t].value.value)},gi.prototype.setValue=function(t,e){this._values.hasOwnProperty(t)||(this._values[t]=new mi(this._values[t].property)),this._values[t].value=new di(this._values[t].property,null===e?void 0:x(e))},gi.prototype.getTransition=function(t){return x(this._values[t].transition)},gi.prototype.setTransition=function(t,e){this._values.hasOwnProperty(t)||(this._values[t]=new mi(this._values[t].property)),this._values[t].transition=x(e)||void 0},gi.prototype.serialize=function(){for(var t={},e=0,r=Object.keys(this._values);ethis.end)return this.prior=null,i;if(this.value.isDataDriven())return this.prior=null,i;if(n=1)return 1;var e=t*t,r=e*t;return 4*(t<.5?r:3*(t-e)+r-.75)}(o))}return i};var yi=function(t){this._properties=t,this._values=Object.create(t.defaultTransitioningPropertyValues)};yi.prototype.possiblyEvaluate=function(t,e,r){for(var n=new _i(this._properties),i=0,a=Object.keys(this._values);in.zoomHistory.lastIntegerZoom?{from:t,to:e}:{from:r,to:e}},e.prototype.interpolate=function(t){return t},e}(Ti),Ai=function(t){this.specification=t};Ai.prototype.possiblyEvaluate=function(t,e,r,n){if(void 0!==t.value){if("constant"===t.expression.kind){var i=t.expression.evaluate(e,null,{},r,n);return this._calculate(i,i,i,e)}return this._calculate(t.expression.evaluate(new pi(Math.floor(e.zoom-1),e)),t.expression.evaluate(new pi(Math.floor(e.zoom),e)),t.expression.evaluate(new pi(Math.floor(e.zoom+1),e)),e)}},Ai.prototype._calculate=function(t,e,r,n){return n.zoom>n.zoomHistory.lastIntegerZoom?{from:t,to:e}:{from:r,to:e}},Ai.prototype.interpolate=function(t){return t};var Mi=function(t){this.specification=t};Mi.prototype.possiblyEvaluate=function(t,e,r,n){return!!t.expression.evaluate(e,null,{},r,n)},Mi.prototype.interpolate=function(){return!1};var Si=function(t){for(var e in this.properties=t,this.defaultPropertyValues={},this.defaultTransitionablePropertyValues={},this.defaultTransitioningPropertyValues={},this.defaultPossiblyEvaluatedValues={},this.overridableProperties=[],t){var r=t[e];r.specification.overridable&&this.overridableProperties.push(e);var n=this.defaultPropertyValues[e]=new di(r,void 0),i=this.defaultTransitionablePropertyValues[e]=new mi(r);this.defaultTransitioningPropertyValues[e]=i.untransitioned(),this.defaultPossiblyEvaluatedValues[e]=n.possiblyEvaluate({})}};Nn("DataDrivenProperty",Ti),Nn("DataConstantProperty",wi),Nn("CrossFadedDataDrivenProperty",ki),Nn("CrossFadedProperty",Ai),Nn("ColorRampProperty",Mi);var Ei=function(t){function e(e,r){if(t.call(this),this.id=e.id,this.type=e.type,this._featureFilter={filter:function(){return!0},needGeometry:!1},"custom"!==e.type&&(e=e,this.metadata=e.metadata,this.minzoom=e.minzoom,this.maxzoom=e.maxzoom,"background"!==e.type&&(this.source=e.source,this.sourceLayer=e["source-layer"],this.filter=e.filter),r.layout&&(this._unevaluatedLayout=new xi(r.layout)),r.paint)){for(var n in this._transitionablePaint=new gi(r.paint),e.paint)this.setPaintProperty(n,e.paint[n],{validate:!1});for(var i in e.layout)this.setLayoutProperty(i,e.layout[i],{validate:!1});this._transitioningPaint=this._transitionablePaint.untransitioned(),this.paint=new _i(r.paint)}}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.getCrossfadeParameters=function(){return this._crossfadeParameters},e.prototype.getLayoutProperty=function(t){return"visibility"===t?this.visibility:this._unevaluatedLayout.getValue(t)},e.prototype.setLayoutProperty=function(t,e,r){if(void 0===r&&(r={}),null!=e){var n="layers."+this.id+".layout."+t;if(this._validate(In,n,t,e,r))return}"visibility"!==t?this._unevaluatedLayout.setValue(t,e):this.visibility=e},e.prototype.getPaintProperty=function(t){return g(t,"-transition")?this._transitionablePaint.getTransition(t.slice(0,-"-transition".length)):this._transitionablePaint.getValue(t)},e.prototype.setPaintProperty=function(t,e,r){if(void 0===r&&(r={}),null!=e){var n="layers."+this.id+".paint."+t;if(this._validate(Pn,n,t,e,r))return!1}if(g(t,"-transition"))return this._transitionablePaint.setTransition(t.slice(0,-"-transition".length),e||void 0),!1;var i=this._transitionablePaint._values[t],a="cross-faded-data-driven"===i.property.specification["property-type"],o=i.value.isDataDriven(),s=i.value;this._transitionablePaint.setValue(t,e),this._handleSpecialPaintPropertyUpdate(t);var l=this._transitionablePaint._values[t].value;return l.isDataDriven()||o||a||this._handleOverridablePaintPropertyUpdate(t,s,l)},e.prototype._handleSpecialPaintPropertyUpdate=function(t){},e.prototype._handleOverridablePaintPropertyUpdate=function(t,e,r){return!1},e.prototype.isHidden=function(t){return!!(this.minzoom&&t=this.maxzoom)||"none"===this.visibility)},e.prototype.updateTransitions=function(t){this._transitioningPaint=this._transitionablePaint.transitioned(t,this._transitioningPaint)},e.prototype.hasTransition=function(){return this._transitioningPaint.hasTransition()},e.prototype.recalculate=function(t,e){t.getCrossfadeParameters&&(this._crossfadeParameters=t.getCrossfadeParameters()),this._unevaluatedLayout&&(this.layout=this._unevaluatedLayout.possiblyEvaluate(t,void 0,e)),this.paint=this._transitioningPaint.possiblyEvaluate(t,void 0,e)},e.prototype.serialize=function(){var t={id:this.id,type:this.type,source:this.source,"source-layer":this.sourceLayer,metadata:this.metadata,minzoom:this.minzoom,maxzoom:this.maxzoom,filter:this.filter,layout:this._unevaluatedLayout&&this._unevaluatedLayout.serialize(),paint:this._transitionablePaint&&this._transitionablePaint.serialize()};return this.visibility&&(t.layout=t.layout||{},t.layout.visibility=this.visibility),y(t,(function(t,e){return!(void 0===t||"layout"===e&&!Object.keys(t).length||"paint"===e&&!Object.keys(t).length)}))},e.prototype._validate=function(t,e,r,n,i){return void 0===i&&(i={}),(!i||!1!==i.validate)&&On(this,t.call(Ln,{key:e,layerType:this.type,objectKey:r,value:n,styleSpec:Lt,style:{glyphs:!0,sprite:!0}}))},e.prototype.is3D=function(){return!1},e.prototype.isTileClipped=function(){return!1},e.prototype.hasOffscreenPass=function(){return!1},e.prototype.resize=function(){},e.prototype.isStateDependent=function(){for(var t in this.paint._values){var e=this.paint.get(t);if(e instanceof bi&&zr(e.property.specification)&&(("source"===e.value.kind||"composite"===e.value.kind)&&e.value.isStateDependent))return!0}return!1},e}(Et),Li={Int8:Int8Array,Uint8:Uint8Array,Int16:Int16Array,Uint16:Uint16Array,Int32:Int32Array,Uint32:Uint32Array,Float32:Float32Array},Ci=function(t,e){this._structArray=t,this._pos1=e*this.size,this._pos2=this._pos1/2,this._pos4=this._pos1/4,this._pos8=this._pos1/8},Pi=function(){this.isTransferred=!1,this.capacity=-1,this.resize(0)};function Ii(t,e){void 0===e&&(e=1);var r=0,n=0;return{members:t.map((function(t){var i,a=(i=t.type,Li[i].BYTES_PER_ELEMENT),o=r=Oi(r,Math.max(e,a)),s=t.components||1;return n=Math.max(n,a),r+=a*s,{name:t.name,type:t.type,components:s,offset:o}})),size:Oi(r,Math.max(n,e)),alignment:e}}function Oi(t,e){return Math.ceil(t/e)*e}Pi.serialize=function(t,e){return t._trim(),e&&(t.isTransferred=!0,e.push(t.arrayBuffer)),{length:t.length,arrayBuffer:t.arrayBuffer}},Pi.deserialize=function(t){var e=Object.create(this.prototype);return e.arrayBuffer=t.arrayBuffer,e.length=t.length,e.capacity=t.arrayBuffer.byteLength/e.bytesPerElement,e._refreshViews(),e},Pi.prototype._trim=function(){this.length!==this.capacity&&(this.capacity=this.length,this.arrayBuffer=this.arrayBuffer.slice(0,this.length*this.bytesPerElement),this._refreshViews())},Pi.prototype.clear=function(){this.length=0},Pi.prototype.resize=function(t){this.reserve(t),this.length=t},Pi.prototype.reserve=function(t){if(t>this.capacity){this.capacity=Math.max(t,Math.floor(5*this.capacity),128),this.arrayBuffer=new ArrayBuffer(this.capacity*this.bytesPerElement);var e=this.uint8;this._refreshViews(),e&&this.uint8.set(e)}},Pi.prototype._refreshViews=function(){throw new Error("_refreshViews() must be implemented by each concrete StructArray layout")};var zi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e){var r=this.length;return this.resize(r+1),this.emplace(r,t,e)},e.prototype.emplace=function(t,e,r){var n=2*t;return this.int16[n+0]=e,this.int16[n+1]=r,t},e}(Pi);zi.prototype.bytesPerElement=4,Nn("StructArrayLayout2i4",zi);var Di=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n){var i=this.length;return this.resize(i+1),this.emplace(i,t,e,r,n)},e.prototype.emplace=function(t,e,r,n,i){var a=4*t;return this.int16[a+0]=e,this.int16[a+1]=r,this.int16[a+2]=n,this.int16[a+3]=i,t},e}(Pi);Di.prototype.bytesPerElement=8,Nn("StructArrayLayout4i8",Di);var Ri=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a){var o=this.length;return this.resize(o+1),this.emplace(o,t,e,r,n,i,a)},e.prototype.emplace=function(t,e,r,n,i,a,o){var s=6*t;return this.int16[s+0]=e,this.int16[s+1]=r,this.int16[s+2]=n,this.int16[s+3]=i,this.int16[s+4]=a,this.int16[s+5]=o,t},e}(Pi);Ri.prototype.bytesPerElement=12,Nn("StructArrayLayout2i4i12",Ri);var Fi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a){var o=this.length;return this.resize(o+1),this.emplace(o,t,e,r,n,i,a)},e.prototype.emplace=function(t,e,r,n,i,a,o){var s=4*t,l=8*t;return this.int16[s+0]=e,this.int16[s+1]=r,this.uint8[l+4]=n,this.uint8[l+5]=i,this.uint8[l+6]=a,this.uint8[l+7]=o,t},e}(Pi);Fi.prototype.bytesPerElement=8,Nn("StructArrayLayout2i4ub8",Fi);var Bi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a,o,s,l,c){var u=this.length;return this.resize(u+1),this.emplace(u,t,e,r,n,i,a,o,s,l,c)},e.prototype.emplace=function(t,e,r,n,i,a,o,s,l,c,u){var f=9*t,h=18*t;return this.uint16[f+0]=e,this.uint16[f+1]=r,this.uint16[f+2]=n,this.uint16[f+3]=i,this.uint16[f+4]=a,this.uint16[f+5]=o,this.uint16[f+6]=s,this.uint16[f+7]=l,this.uint8[h+16]=c,this.uint8[h+17]=u,t},e}(Pi);Bi.prototype.bytesPerElement=18,Nn("StructArrayLayout8ui2ub18",Bi);var Ni=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a,o,s,l,c,u,f){var h=this.length;return this.resize(h+1),this.emplace(h,t,e,r,n,i,a,o,s,l,c,u,f)},e.prototype.emplace=function(t,e,r,n,i,a,o,s,l,c,u,f,h){var p=12*t;return this.int16[p+0]=e,this.int16[p+1]=r,this.int16[p+2]=n,this.int16[p+3]=i,this.uint16[p+4]=a,this.uint16[p+5]=o,this.uint16[p+6]=s,this.uint16[p+7]=l,this.int16[p+8]=c,this.int16[p+9]=u,this.int16[p+10]=f,this.int16[p+11]=h,t},e}(Pi);Ni.prototype.bytesPerElement=24,Nn("StructArrayLayout4i4ui4i24",Ni);var ji=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r){var n=this.length;return this.resize(n+1),this.emplace(n,t,e,r)},e.prototype.emplace=function(t,e,r,n){var i=3*t;return this.float32[i+0]=e,this.float32[i+1]=r,this.float32[i+2]=n,t},e}(Pi);ji.prototype.bytesPerElement=12,Nn("StructArrayLayout3f12",ji);var Ui=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t){var e=this.length;return this.resize(e+1),this.emplace(e,t)},e.prototype.emplace=function(t,e){var r=1*t;return this.uint32[r+0]=e,t},e}(Pi);Ui.prototype.bytesPerElement=4,Nn("StructArrayLayout1ul4",Ui);var Vi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a,o,s,l){var c=this.length;return this.resize(c+1),this.emplace(c,t,e,r,n,i,a,o,s,l)},e.prototype.emplace=function(t,e,r,n,i,a,o,s,l,c){var u=10*t,f=5*t;return this.int16[u+0]=e,this.int16[u+1]=r,this.int16[u+2]=n,this.int16[u+3]=i,this.int16[u+4]=a,this.int16[u+5]=o,this.uint32[f+3]=s,this.uint16[u+8]=l,this.uint16[u+9]=c,t},e}(Pi);Vi.prototype.bytesPerElement=20,Nn("StructArrayLayout6i1ul2ui20",Vi);var Hi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a){var o=this.length;return this.resize(o+1),this.emplace(o,t,e,r,n,i,a)},e.prototype.emplace=function(t,e,r,n,i,a,o){var s=6*t;return this.int16[s+0]=e,this.int16[s+1]=r,this.int16[s+2]=n,this.int16[s+3]=i,this.int16[s+4]=a,this.int16[s+5]=o,t},e}(Pi);Hi.prototype.bytesPerElement=12,Nn("StructArrayLayout2i2i2i12",Hi);var qi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i){var a=this.length;return this.resize(a+1),this.emplace(a,t,e,r,n,i)},e.prototype.emplace=function(t,e,r,n,i,a){var o=4*t,s=8*t;return this.float32[o+0]=e,this.float32[o+1]=r,this.float32[o+2]=n,this.int16[s+6]=i,this.int16[s+7]=a,t},e}(Pi);qi.prototype.bytesPerElement=16,Nn("StructArrayLayout2f1f2i16",qi);var Gi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n){var i=this.length;return this.resize(i+1),this.emplace(i,t,e,r,n)},e.prototype.emplace=function(t,e,r,n,i){var a=12*t,o=3*t;return this.uint8[a+0]=e,this.uint8[a+1]=r,this.float32[o+1]=n,this.float32[o+2]=i,t},e}(Pi);Gi.prototype.bytesPerElement=12,Nn("StructArrayLayout2ub2f12",Gi);var Yi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r){var n=this.length;return this.resize(n+1),this.emplace(n,t,e,r)},e.prototype.emplace=function(t,e,r,n){var i=3*t;return this.uint16[i+0]=e,this.uint16[i+1]=r,this.uint16[i+2]=n,t},e}(Pi);Yi.prototype.bytesPerElement=6,Nn("StructArrayLayout3ui6",Yi);var Wi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g){var v=this.length;return this.resize(v+1),this.emplace(v,t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g)},e.prototype.emplace=function(t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g,v){var y=24*t,x=12*t,b=48*t;return this.int16[y+0]=e,this.int16[y+1]=r,this.uint16[y+2]=n,this.uint16[y+3]=i,this.uint32[x+2]=a,this.uint32[x+3]=o,this.uint32[x+4]=s,this.uint16[y+10]=l,this.uint16[y+11]=c,this.uint16[y+12]=u,this.float32[x+7]=f,this.float32[x+8]=h,this.uint8[b+36]=p,this.uint8[b+37]=d,this.uint8[b+38]=m,this.uint32[x+10]=g,this.int16[y+22]=v,t},e}(Pi);Wi.prototype.bytesPerElement=48,Nn("StructArrayLayout2i2ui3ul3ui2f3ub1ul1i48",Wi);var Xi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g,v,y,x,b,_,w,T,k,A,M,S){var E=this.length;return this.resize(E+1),this.emplace(E,t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g,v,y,x,b,_,w,T,k,A,M,S)},e.prototype.emplace=function(t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g,v,y,x,b,_,w,T,k,A,M,S,E){var L=34*t,C=17*t;return this.int16[L+0]=e,this.int16[L+1]=r,this.int16[L+2]=n,this.int16[L+3]=i,this.int16[L+4]=a,this.int16[L+5]=o,this.int16[L+6]=s,this.int16[L+7]=l,this.uint16[L+8]=c,this.uint16[L+9]=u,this.uint16[L+10]=f,this.uint16[L+11]=h,this.uint16[L+12]=p,this.uint16[L+13]=d,this.uint16[L+14]=m,this.uint16[L+15]=g,this.uint16[L+16]=v,this.uint16[L+17]=y,this.uint16[L+18]=x,this.uint16[L+19]=b,this.uint16[L+20]=_,this.uint16[L+21]=w,this.uint16[L+22]=T,this.uint32[C+12]=k,this.float32[C+13]=A,this.float32[C+14]=M,this.float32[C+15]=S,this.float32[C+16]=E,t},e}(Pi);Xi.prototype.bytesPerElement=68,Nn("StructArrayLayout8i15ui1ul4f68",Xi);var Zi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t){var e=this.length;return this.resize(e+1),this.emplace(e,t)},e.prototype.emplace=function(t,e){var r=1*t;return this.float32[r+0]=e,t},e}(Pi);Zi.prototype.bytesPerElement=4,Nn("StructArrayLayout1f4",Zi);var Ji=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r){var n=this.length;return this.resize(n+1),this.emplace(n,t,e,r)},e.prototype.emplace=function(t,e,r,n){var i=3*t;return this.int16[i+0]=e,this.int16[i+1]=r,this.int16[i+2]=n,t},e}(Pi);Ji.prototype.bytesPerElement=6,Nn("StructArrayLayout3i6",Ji);var Ki=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r){var n=this.length;return this.resize(n+1),this.emplace(n,t,e,r)},e.prototype.emplace=function(t,e,r,n){var i=2*t,a=4*t;return this.uint32[i+0]=e,this.uint16[a+2]=r,this.uint16[a+3]=n,t},e}(Pi);Ki.prototype.bytesPerElement=8,Nn("StructArrayLayout1ul2ui8",Ki);var Qi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e){var r=this.length;return this.resize(r+1),this.emplace(r,t,e)},e.prototype.emplace=function(t,e,r){var n=2*t;return this.uint16[n+0]=e,this.uint16[n+1]=r,t},e}(Pi);Qi.prototype.bytesPerElement=4,Nn("StructArrayLayout2ui4",Qi);var $i=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t){var e=this.length;return this.resize(e+1),this.emplace(e,t)},e.prototype.emplace=function(t,e){var r=1*t;return this.uint16[r+0]=e,t},e}(Pi);$i.prototype.bytesPerElement=2,Nn("StructArrayLayout1ui2",$i);var ta=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e){var r=this.length;return this.resize(r+1),this.emplace(r,t,e)},e.prototype.emplace=function(t,e,r){var n=2*t;return this.float32[n+0]=e,this.float32[n+1]=r,t},e}(Pi);ta.prototype.bytesPerElement=8,Nn("StructArrayLayout2f8",ta);var ea=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,r,n){var i=this.length;return this.resize(i+1),this.emplace(i,t,e,r,n)},e.prototype.emplace=function(t,e,r,n,i){var a=4*t;return this.float32[a+0]=e,this.float32[a+1]=r,this.float32[a+2]=n,this.float32[a+3]=i,t},e}(Pi);ea.prototype.bytesPerElement=16,Nn("StructArrayLayout4f16",ea);var ra=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var r={anchorPointX:{configurable:!0},anchorPointY:{configurable:!0},x1:{configurable:!0},y1:{configurable:!0},x2:{configurable:!0},y2:{configurable:!0},featureIndex:{configurable:!0},sourceLayerIndex:{configurable:!0},bucketIndex:{configurable:!0},anchorPoint:{configurable:!0}};return r.anchorPointX.get=function(){return this._structArray.int16[this._pos2+0]},r.anchorPointY.get=function(){return this._structArray.int16[this._pos2+1]},r.x1.get=function(){return this._structArray.int16[this._pos2+2]},r.y1.get=function(){return this._structArray.int16[this._pos2+3]},r.x2.get=function(){return this._structArray.int16[this._pos2+4]},r.y2.get=function(){return this._structArray.int16[this._pos2+5]},r.featureIndex.get=function(){return this._structArray.uint32[this._pos4+3]},r.sourceLayerIndex.get=function(){return this._structArray.uint16[this._pos2+8]},r.bucketIndex.get=function(){return this._structArray.uint16[this._pos2+9]},r.anchorPoint.get=function(){return new i(this.anchorPointX,this.anchorPointY)},Object.defineProperties(e.prototype,r),e}(Ci);ra.prototype.size=20;var na=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new ra(this,t)},e}(Vi);Nn("CollisionBoxArray",na);var ia=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var r={anchorX:{configurable:!0},anchorY:{configurable:!0},glyphStartIndex:{configurable:!0},numGlyphs:{configurable:!0},vertexStartIndex:{configurable:!0},lineStartIndex:{configurable:!0},lineLength:{configurable:!0},segment:{configurable:!0},lowerSize:{configurable:!0},upperSize:{configurable:!0},lineOffsetX:{configurable:!0},lineOffsetY:{configurable:!0},writingMode:{configurable:!0},placedOrientation:{configurable:!0},hidden:{configurable:!0},crossTileID:{configurable:!0},associatedIconIndex:{configurable:!0}};return r.anchorX.get=function(){return this._structArray.int16[this._pos2+0]},r.anchorY.get=function(){return this._structArray.int16[this._pos2+1]},r.glyphStartIndex.get=function(){return this._structArray.uint16[this._pos2+2]},r.numGlyphs.get=function(){return this._structArray.uint16[this._pos2+3]},r.vertexStartIndex.get=function(){return this._structArray.uint32[this._pos4+2]},r.lineStartIndex.get=function(){return this._structArray.uint32[this._pos4+3]},r.lineLength.get=function(){return this._structArray.uint32[this._pos4+4]},r.segment.get=function(){return this._structArray.uint16[this._pos2+10]},r.lowerSize.get=function(){return this._structArray.uint16[this._pos2+11]},r.upperSize.get=function(){return this._structArray.uint16[this._pos2+12]},r.lineOffsetX.get=function(){return this._structArray.float32[this._pos4+7]},r.lineOffsetY.get=function(){return this._structArray.float32[this._pos4+8]},r.writingMode.get=function(){return this._structArray.uint8[this._pos1+36]},r.placedOrientation.get=function(){return this._structArray.uint8[this._pos1+37]},r.placedOrientation.set=function(t){this._structArray.uint8[this._pos1+37]=t},r.hidden.get=function(){return this._structArray.uint8[this._pos1+38]},r.hidden.set=function(t){this._structArray.uint8[this._pos1+38]=t},r.crossTileID.get=function(){return this._structArray.uint32[this._pos4+10]},r.crossTileID.set=function(t){this._structArray.uint32[this._pos4+10]=t},r.associatedIconIndex.get=function(){return this._structArray.int16[this._pos2+22]},Object.defineProperties(e.prototype,r),e}(Ci);ia.prototype.size=48;var aa=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new ia(this,t)},e}(Wi);Nn("PlacedSymbolArray",aa);var oa=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var r={anchorX:{configurable:!0},anchorY:{configurable:!0},rightJustifiedTextSymbolIndex:{configurable:!0},centerJustifiedTextSymbolIndex:{configurable:!0},leftJustifiedTextSymbolIndex:{configurable:!0},verticalPlacedTextSymbolIndex:{configurable:!0},placedIconSymbolIndex:{configurable:!0},verticalPlacedIconSymbolIndex:{configurable:!0},key:{configurable:!0},textBoxStartIndex:{configurable:!0},textBoxEndIndex:{configurable:!0},verticalTextBoxStartIndex:{configurable:!0},verticalTextBoxEndIndex:{configurable:!0},iconBoxStartIndex:{configurable:!0},iconBoxEndIndex:{configurable:!0},verticalIconBoxStartIndex:{configurable:!0},verticalIconBoxEndIndex:{configurable:!0},featureIndex:{configurable:!0},numHorizontalGlyphVertices:{configurable:!0},numVerticalGlyphVertices:{configurable:!0},numIconVertices:{configurable:!0},numVerticalIconVertices:{configurable:!0},useRuntimeCollisionCircles:{configurable:!0},crossTileID:{configurable:!0},textBoxScale:{configurable:!0},textOffset0:{configurable:!0},textOffset1:{configurable:!0},collisionCircleDiameter:{configurable:!0}};return r.anchorX.get=function(){return this._structArray.int16[this._pos2+0]},r.anchorY.get=function(){return this._structArray.int16[this._pos2+1]},r.rightJustifiedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+2]},r.centerJustifiedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+3]},r.leftJustifiedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+4]},r.verticalPlacedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+5]},r.placedIconSymbolIndex.get=function(){return this._structArray.int16[this._pos2+6]},r.verticalPlacedIconSymbolIndex.get=function(){return this._structArray.int16[this._pos2+7]},r.key.get=function(){return this._structArray.uint16[this._pos2+8]},r.textBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+9]},r.textBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+10]},r.verticalTextBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+11]},r.verticalTextBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+12]},r.iconBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+13]},r.iconBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+14]},r.verticalIconBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+15]},r.verticalIconBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+16]},r.featureIndex.get=function(){return this._structArray.uint16[this._pos2+17]},r.numHorizontalGlyphVertices.get=function(){return this._structArray.uint16[this._pos2+18]},r.numVerticalGlyphVertices.get=function(){return this._structArray.uint16[this._pos2+19]},r.numIconVertices.get=function(){return this._structArray.uint16[this._pos2+20]},r.numVerticalIconVertices.get=function(){return this._structArray.uint16[this._pos2+21]},r.useRuntimeCollisionCircles.get=function(){return this._structArray.uint16[this._pos2+22]},r.crossTileID.get=function(){return this._structArray.uint32[this._pos4+12]},r.crossTileID.set=function(t){this._structArray.uint32[this._pos4+12]=t},r.textBoxScale.get=function(){return this._structArray.float32[this._pos4+13]},r.textOffset0.get=function(){return this._structArray.float32[this._pos4+14]},r.textOffset1.get=function(){return this._structArray.float32[this._pos4+15]},r.collisionCircleDiameter.get=function(){return this._structArray.float32[this._pos4+16]},Object.defineProperties(e.prototype,r),e}(Ci);oa.prototype.size=68;var sa=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new oa(this,t)},e}(Xi);Nn("SymbolInstanceArray",sa);var la=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.getoffsetX=function(t){return this.float32[1*t+0]},e}(Zi);Nn("GlyphOffsetArray",la);var ca=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.getx=function(t){return this.int16[3*t+0]},e.prototype.gety=function(t){return this.int16[3*t+1]},e.prototype.gettileUnitDistanceFromAnchor=function(t){return this.int16[3*t+2]},e}(Ji);Nn("SymbolLineVertexArray",ca);var ua=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var r={featureIndex:{configurable:!0},sourceLayerIndex:{configurable:!0},bucketIndex:{configurable:!0}};return r.featureIndex.get=function(){return this._structArray.uint32[this._pos4+0]},r.sourceLayerIndex.get=function(){return this._structArray.uint16[this._pos2+2]},r.bucketIndex.get=function(){return this._structArray.uint16[this._pos2+3]},Object.defineProperties(e.prototype,r),e}(Ci);ua.prototype.size=8;var fa=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new ua(this,t)},e}(Ki);Nn("FeatureIndexArray",fa);var ha=Ii([{name:"a_pos",components:2,type:"Int16"}],4).members,pa=function(t){void 0===t&&(t=[]),this.segments=t};function da(t,e){return 256*(t=l(Math.floor(t),0,255))+(e=l(Math.floor(e),0,255))}pa.prototype.prepareSegment=function(t,e,r,n){var i=this.segments[this.segments.length-1];return t>pa.MAX_VERTEX_ARRAY_LENGTH&&_("Max vertices per segment is "+pa.MAX_VERTEX_ARRAY_LENGTH+": bucket requested "+t),(!i||i.vertexLength+t>pa.MAX_VERTEX_ARRAY_LENGTH||i.sortKey!==n)&&(i={vertexOffset:e.length,primitiveOffset:r.length,vertexLength:0,primitiveLength:0},void 0!==n&&(i.sortKey=n),this.segments.push(i)),i},pa.prototype.get=function(){return this.segments},pa.prototype.destroy=function(){for(var t=0,e=this.segments;t>>16)*o&65535)<<16)&4294967295)<<15|l>>>17))*s+(((l>>>16)*s&65535)<<16)&4294967295)<<13|i>>>19))+((5*(i>>>16)&65535)<<16)&4294967295))+((58964+(a>>>16)&65535)<<16);switch(l=0,r){case 3:l^=(255&t.charCodeAt(c+2))<<16;case 2:l^=(255&t.charCodeAt(c+1))<<8;case 1:i^=l=(65535&(l=(l=(65535&(l^=255&t.charCodeAt(c)))*o+(((l>>>16)*o&65535)<<16)&4294967295)<<15|l>>>17))*s+(((l>>>16)*s&65535)<<16)&4294967295}return i^=t.length,i=2246822507*(65535&(i^=i>>>16))+((2246822507*(i>>>16)&65535)<<16)&4294967295,i=3266489909*(65535&(i^=i>>>13))+((3266489909*(i>>>16)&65535)<<16)&4294967295,(i^=i>>>16)>>>0}})),va=e((function(t){t.exports=function(t,e){for(var r,n=t.length,i=e^n,a=0;n>=4;)r=1540483477*(65535&(r=255&t.charCodeAt(a)|(255&t.charCodeAt(++a))<<8|(255&t.charCodeAt(++a))<<16|(255&t.charCodeAt(++a))<<24))+((1540483477*(r>>>16)&65535)<<16),i=1540483477*(65535&i)+((1540483477*(i>>>16)&65535)<<16)^(r=1540483477*(65535&(r^=r>>>24))+((1540483477*(r>>>16)&65535)<<16)),n-=4,++a;switch(n){case 3:i^=(255&t.charCodeAt(a+2))<<16;case 2:i^=(255&t.charCodeAt(a+1))<<8;case 1:i=1540483477*(65535&(i^=255&t.charCodeAt(a)))+((1540483477*(i>>>16)&65535)<<16)}return i=1540483477*(65535&(i^=i>>>13))+((1540483477*(i>>>16)&65535)<<16),(i^=i>>>15)>>>0}})),ya=ga,xa=ga,ba=va;ya.murmur3=xa,ya.murmur2=ba;var _a=function(){this.ids=[],this.positions=[],this.indexed=!1};_a.prototype.add=function(t,e,r,n){this.ids.push(Ta(t)),this.positions.push(e,r,n)},_a.prototype.getPositions=function(t){for(var e=Ta(t),r=0,n=this.ids.length-1;r>1;this.ids[i]>=e?n=i:r=i+1}for(var a=[];this.ids[r]===e;){var o=this.positions[3*r],s=this.positions[3*r+1],l=this.positions[3*r+2];a.push({index:o,start:s,end:l}),r++}return a},_a.serialize=function(t,e){var r=new Float64Array(t.ids),n=new Uint32Array(t.positions);return function t(e,r,n,i){for(;n>1],o=n-1,s=i+1;;){do{o++}while(e[o]a);if(o>=s)break;ka(e,o,s),ka(r,3*o,3*s),ka(r,3*o+1,3*s+1),ka(r,3*o+2,3*s+2)}s-nGa.max||o.yGa.max)&&(_("Geometry exceeds allowed extent, reduce your vector tile buffer size"),o.x=l(o.x,Ga.min,Ga.max),o.y=l(o.y,Ga.min,Ga.max))}return r}function Wa(t,e,r,n,i){t.emplaceBack(2*e+(n+1)/2,2*r+(i+1)/2)}var Xa=function(t){this.zoom=t.zoom,this.overscaling=t.overscaling,this.layers=t.layers,this.layerIds=this.layers.map((function(t){return t.id})),this.index=t.index,this.hasPattern=!1,this.layoutVertexArray=new zi,this.indexArray=new Yi,this.segments=new pa,this.programConfigurations=new Ua(ha,t.layers,t.zoom),this.stateDependentLayerIds=this.layers.filter((function(t){return t.isStateDependent()})).map((function(t){return t.id}))};function Za(t,e){for(var r=0;r1){if($a(t,e))return!0;for(var n=0;n1?t.distSqr(r):t.distSqr(r.sub(e)._mult(i)._add(e))}function no(t,e){for(var r,n,i,a=!1,o=0;oe.y!=i.y>e.y&&e.x<(i.x-n.x)*(e.y-n.y)/(i.y-n.y)+n.x&&(a=!a);return a}function io(t,e){for(var r=!1,n=0,i=t.length-1;ne.y!=o.y>e.y&&e.x<(o.x-a.x)*(e.y-a.y)/(o.y-a.y)+a.x&&(r=!r)}return r}function ao(t,e,r){var n=r[0],i=r[2];if(t.xi.x&&e.x>i.x||t.yi.y&&e.y>i.y)return!1;var a=w(t,e,r[0]);return a!==w(t,e,r[1])||a!==w(t,e,r[2])||a!==w(t,e,r[3])}function oo(t,e,r){var n=e.paint.get(t).value;return"constant"===n.kind?n.value:r.programConfigurations.get(e.id).getMaxValue(t)}function so(t){return Math.sqrt(t[0]*t[0]+t[1]*t[1])}function lo(t,e,r,n,a){if(!e[0]&&!e[1])return t;var o=i.convert(e)._mult(a);"viewport"===r&&o._rotate(-n);for(var s=[],l=0;l=8192||u<0||u>=8192)){var f=this.segments.prepareSegment(4,this.layoutVertexArray,this.indexArray,t.sortKey),h=f.vertexLength;Wa(this.layoutVertexArray,c,u,-1,-1),Wa(this.layoutVertexArray,c,u,1,-1),Wa(this.layoutVertexArray,c,u,1,1),Wa(this.layoutVertexArray,c,u,-1,1),this.indexArray.emplaceBack(h,h+1,h+2),this.indexArray.emplaceBack(h,h+3,h+2),f.vertexLength+=4,f.primitiveLength+=2}}this.programConfigurations.populatePaintArrays(this.layoutVertexArray.length,t,r,{},n)},Nn("CircleBucket",Xa,{omit:["layers"]});var co=new Si({"circle-sort-key":new Ti(Lt.layout_circle["circle-sort-key"])}),uo={paint:new Si({"circle-radius":new Ti(Lt.paint_circle["circle-radius"]),"circle-color":new Ti(Lt.paint_circle["circle-color"]),"circle-blur":new Ti(Lt.paint_circle["circle-blur"]),"circle-opacity":new Ti(Lt.paint_circle["circle-opacity"]),"circle-translate":new wi(Lt.paint_circle["circle-translate"]),"circle-translate-anchor":new wi(Lt.paint_circle["circle-translate-anchor"]),"circle-pitch-scale":new wi(Lt.paint_circle["circle-pitch-scale"]),"circle-pitch-alignment":new wi(Lt.paint_circle["circle-pitch-alignment"]),"circle-stroke-width":new Ti(Lt.paint_circle["circle-stroke-width"]),"circle-stroke-color":new Ti(Lt.paint_circle["circle-stroke-color"]),"circle-stroke-opacity":new Ti(Lt.paint_circle["circle-stroke-opacity"])}),layout:co},fo="undefined"!=typeof Float32Array?Float32Array:Array;function ho(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}function po(t,e,r){var n=e[0],i=e[1],a=e[2],o=e[3],s=e[4],l=e[5],c=e[6],u=e[7],f=e[8],h=e[9],p=e[10],d=e[11],m=e[12],g=e[13],v=e[14],y=e[15],x=r[0],b=r[1],_=r[2],w=r[3];return t[0]=x*n+b*s+_*f+w*m,t[1]=x*i+b*l+_*h+w*g,t[2]=x*a+b*c+_*p+w*v,t[3]=x*o+b*u+_*d+w*y,x=r[4],b=r[5],_=r[6],w=r[7],t[4]=x*n+b*s+_*f+w*m,t[5]=x*i+b*l+_*h+w*g,t[6]=x*a+b*c+_*p+w*v,t[7]=x*o+b*u+_*d+w*y,x=r[8],b=r[9],_=r[10],w=r[11],t[8]=x*n+b*s+_*f+w*m,t[9]=x*i+b*l+_*h+w*g,t[10]=x*a+b*c+_*p+w*v,t[11]=x*o+b*u+_*d+w*y,x=r[12],b=r[13],_=r[14],w=r[15],t[12]=x*n+b*s+_*f+w*m,t[13]=x*i+b*l+_*h+w*g,t[14]=x*a+b*c+_*p+w*v,t[15]=x*o+b*u+_*d+w*y,t}Math.hypot||(Math.hypot=function(){for(var t=arguments,e=0,r=arguments.length;r--;)e+=t[r]*t[r];return Math.sqrt(e)});var mo=po;var go,vo,yo=function(t,e,r){return t[0]=e[0]-r[0],t[1]=e[1]-r[1],t[2]=e[2]-r[2],t};go=new fo(3),fo!=Float32Array&&(go[0]=0,go[1]=0,go[2]=0),vo=go;function xo(t,e,r){var n=e[0],i=e[1],a=e[2],o=e[3];return t[0]=r[0]*n+r[4]*i+r[8]*a+r[12]*o,t[1]=r[1]*n+r[5]*i+r[9]*a+r[13]*o,t[2]=r[2]*n+r[6]*i+r[10]*a+r[14]*o,t[3]=r[3]*n+r[7]*i+r[11]*a+r[15]*o,t}!function(){var t=function(){var t=new fo(4);return fo!=Float32Array&&(t[0]=0,t[1]=0,t[2]=0,t[3]=0),t}()}();var bo=function(t){var e=t[0],r=t[1];return e*e+r*r},_o=(function(){var t=function(){var t=new fo(2);return fo!=Float32Array&&(t[0]=0,t[1]=0),t}()}(),function(t){function e(e){t.call(this,e,uo)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.createBucket=function(t){return new Xa(t)},e.prototype.queryRadius=function(t){var e=t;return oo("circle-radius",this,e)+oo("circle-stroke-width",this,e)+so(this.paint.get("circle-translate"))},e.prototype.queryIntersectsFeature=function(t,e,r,n,i,a,o,s){for(var l=lo(t,this.paint.get("circle-translate"),this.paint.get("circle-translate-anchor"),a.angle,o),c=this.paint.get("circle-radius").evaluate(e,r)+this.paint.get("circle-stroke-width").evaluate(e,r),u="map"===this.paint.get("circle-pitch-alignment"),f=u?l:function(t,e){return t.map((function(t){return wo(t,e)}))}(l,s),h=u?c*o:c,p=0,d=n;pt.width||i.height>t.height||r.x>t.width-i.width||r.y>t.height-i.height)throw new RangeError("out of range source coordinates for image copy");if(i.width>e.width||i.height>e.height||n.x>e.width-i.width||n.y>e.height-i.height)throw new RangeError("out of range destination coordinates for image copy");for(var o=t.data,s=e.data,l=0;l80*r){n=a=t[0],i=o=t[1];for(var d=r;da&&(a=s),l>o&&(o=l);c=0!==(c=Math.max(a-n,o-i))?1/c:0}return jo(h,p,r,n,i,c),p}function Bo(t,e,r,n,i){var a,o;if(i===ls(t,e,r,n)>0)for(a=e;a=e;a-=n)o=as(a,t[a],t[a+1],o);return o&&$o(o,o.next)&&(os(o),o=o.next),o}function No(t,e){if(!t)return t;e||(e=t);var r,n=t;do{if(r=!1,n.steiner||!$o(n,n.next)&&0!==Qo(n.prev,n,n.next))n=n.next;else{if(os(n),(n=e=n.prev)===n.next)break;r=!0}}while(r||n!==e);return e}function jo(t,e,r,n,i,a,o){if(t){!o&&a&&function(t,e,r,n){var i=t;do{null===i.z&&(i.z=Xo(i.x,i.y,e,r,n)),i.prevZ=i.prev,i.nextZ=i.next,i=i.next}while(i!==t);i.prevZ.nextZ=null,i.prevZ=null,function(t){var e,r,n,i,a,o,s,l,c=1;do{for(r=t,t=null,a=null,o=0;r;){for(o++,n=r,s=0,e=0;e0||l>0&&n;)0!==s&&(0===l||!n||r.z<=n.z)?(i=r,r=r.nextZ,s--):(i=n,n=n.nextZ,l--),a?a.nextZ=i:t=i,i.prevZ=a,a=i;r=n}a.nextZ=null,c*=2}while(o>1)}(i)}(t,n,i,a);for(var s,l,c=t;t.prev!==t.next;)if(s=t.prev,l=t.next,a?Vo(t,n,i,a):Uo(t))e.push(s.i/r),e.push(t.i/r),e.push(l.i/r),os(t),t=l.next,c=l.next;else if((t=l)===c){o?1===o?jo(t=Ho(No(t),e,r),e,r,n,i,a,2):2===o&&qo(t,e,r,n,i,a):jo(No(t),e,r,n,i,a,1);break}}}function Uo(t){var e=t.prev,r=t,n=t.next;if(Qo(e,r,n)>=0)return!1;for(var i=t.next.next;i!==t.prev;){if(Jo(e.x,e.y,r.x,r.y,n.x,n.y,i.x,i.y)&&Qo(i.prev,i,i.next)>=0)return!1;i=i.next}return!0}function Vo(t,e,r,n){var i=t.prev,a=t,o=t.next;if(Qo(i,a,o)>=0)return!1;for(var s=i.xa.x?i.x>o.x?i.x:o.x:a.x>o.x?a.x:o.x,u=i.y>a.y?i.y>o.y?i.y:o.y:a.y>o.y?a.y:o.y,f=Xo(s,l,e,r,n),h=Xo(c,u,e,r,n),p=t.prevZ,d=t.nextZ;p&&p.z>=f&&d&&d.z<=h;){if(p!==t.prev&&p!==t.next&&Jo(i.x,i.y,a.x,a.y,o.x,o.y,p.x,p.y)&&Qo(p.prev,p,p.next)>=0)return!1;if(p=p.prevZ,d!==t.prev&&d!==t.next&&Jo(i.x,i.y,a.x,a.y,o.x,o.y,d.x,d.y)&&Qo(d.prev,d,d.next)>=0)return!1;d=d.nextZ}for(;p&&p.z>=f;){if(p!==t.prev&&p!==t.next&&Jo(i.x,i.y,a.x,a.y,o.x,o.y,p.x,p.y)&&Qo(p.prev,p,p.next)>=0)return!1;p=p.prevZ}for(;d&&d.z<=h;){if(d!==t.prev&&d!==t.next&&Jo(i.x,i.y,a.x,a.y,o.x,o.y,d.x,d.y)&&Qo(d.prev,d,d.next)>=0)return!1;d=d.nextZ}return!0}function Ho(t,e,r){var n=t;do{var i=n.prev,a=n.next.next;!$o(i,a)&&ts(i,n,n.next,a)&&ns(i,a)&&ns(a,i)&&(e.push(i.i/r),e.push(n.i/r),e.push(a.i/r),os(n),os(n.next),n=t=a),n=n.next}while(n!==t);return No(n)}function qo(t,e,r,n,i,a){var o=t;do{for(var s=o.next.next;s!==o.prev;){if(o.i!==s.i&&Ko(o,s)){var l=is(o,s);return o=No(o,o.next),l=No(l,l.next),jo(o,e,r,n,i,a),void jo(l,e,r,n,i,a)}s=s.next}o=o.next}while(o!==t)}function Go(t,e){return t.x-e.x}function Yo(t,e){if(e=function(t,e){var r,n=e,i=t.x,a=t.y,o=-1/0;do{if(a<=n.y&&a>=n.next.y&&n.next.y!==n.y){var s=n.x+(a-n.y)*(n.next.x-n.x)/(n.next.y-n.y);if(s<=i&&s>o){if(o=s,s===i){if(a===n.y)return n;if(a===n.next.y)return n.next}r=n.x=n.x&&n.x>=u&&i!==n.x&&Jo(ar.x||n.x===r.x&&Wo(r,n)))&&(r=n,h=l)),n=n.next}while(n!==c);return r}(t,e)){var r=is(e,t);No(e,e.next),No(r,r.next)}}function Wo(t,e){return Qo(t.prev,t,e.prev)<0&&Qo(e.next,t,t.next)<0}function Xo(t,e,r,n,i){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t=32767*(t-r)*i)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e=32767*(e-n)*i)|e<<8))|e<<4))|e<<2))|e<<1))<<1}function Zo(t){var e=t,r=t;do{(e.x=0&&(t-o)*(n-s)-(r-o)*(e-s)>=0&&(r-o)*(a-s)-(i-o)*(n-s)>=0}function Ko(t,e){return t.next.i!==e.i&&t.prev.i!==e.i&&!function(t,e){var r=t;do{if(r.i!==t.i&&r.next.i!==t.i&&r.i!==e.i&&r.next.i!==e.i&&ts(r,r.next,t,e))return!0;r=r.next}while(r!==t);return!1}(t,e)&&(ns(t,e)&&ns(e,t)&&function(t,e){var r=t,n=!1,i=(t.x+e.x)/2,a=(t.y+e.y)/2;do{r.y>a!=r.next.y>a&&r.next.y!==r.y&&i<(r.next.x-r.x)*(a-r.y)/(r.next.y-r.y)+r.x&&(n=!n),r=r.next}while(r!==t);return n}(t,e)&&(Qo(t.prev,t,e.prev)||Qo(t,e.prev,e))||$o(t,e)&&Qo(t.prev,t,t.next)>0&&Qo(e.prev,e,e.next)>0)}function Qo(t,e,r){return(e.y-t.y)*(r.x-e.x)-(e.x-t.x)*(r.y-e.y)}function $o(t,e){return t.x===e.x&&t.y===e.y}function ts(t,e,r,n){var i=rs(Qo(t,e,r)),a=rs(Qo(t,e,n)),o=rs(Qo(r,n,t)),s=rs(Qo(r,n,e));return i!==a&&o!==s||(!(0!==i||!es(t,r,e))||(!(0!==a||!es(t,n,e))||(!(0!==o||!es(r,t,n))||!(0!==s||!es(r,e,n)))))}function es(t,e,r){return e.x<=Math.max(t.x,r.x)&&e.x>=Math.min(t.x,r.x)&&e.y<=Math.max(t.y,r.y)&&e.y>=Math.min(t.y,r.y)}function rs(t){return t>0?1:t<0?-1:0}function ns(t,e){return Qo(t.prev,t,t.next)<0?Qo(t,e,t.next)>=0&&Qo(t,t.prev,e)>=0:Qo(t,e,t.prev)<0||Qo(t,t.next,e)<0}function is(t,e){var r=new ss(t.i,t.x,t.y),n=new ss(e.i,e.x,e.y),i=t.next,a=e.prev;return t.next=e,e.prev=t,r.next=i,i.prev=r,n.next=r,r.prev=n,a.next=n,n.prev=a,n}function as(t,e,r,n){var i=new ss(t,e,r);return n?(i.next=n.next,i.prev=n,n.next.prev=i,n.next=i):(i.prev=i,i.next=i),i}function os(t){t.next.prev=t.prev,t.prev.next=t.next,t.prevZ&&(t.prevZ.nextZ=t.nextZ),t.nextZ&&(t.nextZ.prevZ=t.prevZ)}function ss(t,e,r){this.i=t,this.x=e,this.y=r,this.prev=null,this.next=null,this.z=null,this.prevZ=null,this.nextZ=null,this.steiner=!1}function ls(t,e,r,n){for(var i=0,a=e,o=r-n;an;){if(i-n>600){var o=i-n+1,s=r-n+1,l=Math.log(o),c=.5*Math.exp(2*l/3),u=.5*Math.sqrt(l*c*(o-c)/o)*(s-o/2<0?-1:1),f=Math.max(n,Math.floor(r-s*c/o+u)),h=Math.min(i,Math.floor(r+(o-s)*c/o+u));t(e,r,f,h,a)}var p=e[r],d=n,m=i;for(us(e,n,r),a(e[i],p)>0&&us(e,n,i);d0;)m--}0===a(e[n],p)?us(e,n,m):(m++,us(e,m,i)),m<=r&&(n=m+1),r<=m&&(i=m-1)}}(t,e,r||0,n||t.length-1,i||fs)}function us(t,e,r){var n=t[e];t[e]=t[r],t[r]=n}function fs(t,e){return te?1:0}function hs(t,e){var r=t.length;if(r<=1)return[t];for(var n,i,a=[],o=0;o1)for(var l=0;l0&&(n+=t[i-1].length,r.holes.push(n))}return r},Do.default=Ro;var gs=function(t){this.zoom=t.zoom,this.overscaling=t.overscaling,this.layers=t.layers,this.layerIds=this.layers.map((function(t){return t.id})),this.index=t.index,this.hasPattern=!1,this.patternFeatures=[],this.layoutVertexArray=new zi,this.indexArray=new Yi,this.indexArray2=new Qi,this.programConfigurations=new Ua(zo,t.layers,t.zoom),this.segments=new pa,this.segments2=new pa,this.stateDependentLayerIds=this.layers.filter((function(t){return t.isStateDependent()})).map((function(t){return t.id}))};gs.prototype.populate=function(t,e,r){this.hasPattern=ds("fill",this.layers,e);for(var n=this.layers[0].layout.get("fill-sort-key"),i=[],a=0,o=t;a>3}if(a--,1===n||2===n)o+=t.readSVarint(),s+=t.readSVarint(),1===n&&(e&&l.push(e),e=[]),e.push(new i(o,s));else{if(7!==n)throw new Error("unknown command "+n);e&&e.push(e[0].clone())}}return e&&l.push(e),l},ws.prototype.bbox=function(){var t=this._pbf;t.pos=this._geometry;for(var e=t.readVarint()+t.pos,r=1,n=0,i=0,a=0,o=1/0,s=-1/0,l=1/0,c=-1/0;t.pos>3}if(n--,1===r||2===r)(i+=t.readSVarint())s&&(s=i),(a+=t.readSVarint())c&&(c=a);else if(7!==r)throw new Error("unknown command "+r)}return[o,l,s,c]},ws.prototype.toGeoJSON=function(t,e,r){var n,i,a=this.extent*Math.pow(2,r),o=this.extent*t,s=this.extent*e,l=this.loadGeometry(),c=ws.types[this.type];function u(t){for(var e=0;e>3;e=1===n?t.readString():2===n?t.readFloat():3===n?t.readDouble():4===n?t.readVarint64():5===n?t.readVarint():6===n?t.readSVarint():7===n?t.readBoolean():null}return e}(r))}function Es(t,e,r){if(3===t){var n=new As(r,r.readVarint()+r.pos);n.length&&(e[n.name]=n)}}Ms.prototype.feature=function(t){if(t<0||t>=this._features.length)throw new Error("feature index out of bounds");this._pbf.pos=this._features[t];var e=this._pbf.readVarint()+this._pbf.pos;return new _s(this._pbf,e,this.extent,this._keys,this._values)};var Ls={VectorTile:function(t,e){this.layers=t.readFields(Es,{},e)},VectorTileFeature:_s,VectorTileLayer:As},Cs=Ls.VectorTileFeature.types,Ps=Math.pow(2,13);function Is(t,e,r,n,i,a,o,s){t.emplaceBack(e,r,2*Math.floor(n*Ps)+o,i*Ps*2,a*Ps*2,Math.round(s))}var Os=function(t){this.zoom=t.zoom,this.overscaling=t.overscaling,this.layers=t.layers,this.layerIds=this.layers.map((function(t){return t.id})),this.index=t.index,this.hasPattern=!1,this.layoutVertexArray=new Ri,this.indexArray=new Yi,this.programConfigurations=new Ua(bs,t.layers,t.zoom),this.segments=new pa,this.stateDependentLayerIds=this.layers.filter((function(t){return t.isStateDependent()})).map((function(t){return t.id}))};function zs(t,e){return t.x===e.x&&(t.x<0||t.x>8192)||t.y===e.y&&(t.y<0||t.y>8192)}function Ds(t){return t.every((function(t){return t.x<0}))||t.every((function(t){return t.x>8192}))||t.every((function(t){return t.y<0}))||t.every((function(t){return t.y>8192}))}Os.prototype.populate=function(t,e,r){this.features=[],this.hasPattern=ds("fill-extrusion",this.layers,e);for(var n=0,i=t;n=1){var y=d[g-1];if(!zs(v,y)){f.vertexLength+4>pa.MAX_VERTEX_ARRAY_LENGTH&&(f=this.segments.prepareSegment(4,this.layoutVertexArray,this.indexArray));var x=v.sub(y)._perp()._unit(),b=y.dist(v);m+b>32768&&(m=0),Is(this.layoutVertexArray,v.x,v.y,x.x,x.y,0,0,m),Is(this.layoutVertexArray,v.x,v.y,x.x,x.y,0,1,m),m+=b,Is(this.layoutVertexArray,y.x,y.y,x.x,x.y,0,0,m),Is(this.layoutVertexArray,y.x,y.y,x.x,x.y,0,1,m);var _=f.vertexLength;this.indexArray.emplaceBack(_,_+2,_+1),this.indexArray.emplaceBack(_+1,_+2,_+3),f.vertexLength+=4,f.primitiveLength+=2}}}}if(f.vertexLength+l>pa.MAX_VERTEX_ARRAY_LENGTH&&(f=this.segments.prepareSegment(l,this.layoutVertexArray,this.indexArray)),"Polygon"===Cs[t.type]){for(var w=[],T=[],k=f.vertexLength,A=0,M=s;A=2&&t[l-1].equals(t[l-2]);)l--;for(var c=0;c0;if(T&&v>c){var A=u.dist(p);if(A>2*f){var M=u.sub(u.sub(p)._mult(f/A)._round());this.updateDistance(p,M),this.addCurrentVertex(M,m,0,0,h),p=M}}var S=p&&d,E=S?r:s?"butt":n;if(S&&"round"===E&&(_i&&(E="bevel"),"bevel"===E&&(_>2&&(E="flipbevel"),_100)y=g.mult(-1);else{var L=_*m.add(g).mag()/m.sub(g).mag();y._perp()._mult(L*(k?-1:1))}this.addCurrentVertex(u,y,0,0,h),this.addCurrentVertex(u,y.mult(-1),0,0,h)}else if("bevel"===E||"fakeround"===E){var C=-Math.sqrt(_*_-1),P=k?C:0,I=k?0:C;if(p&&this.addCurrentVertex(u,m,P,I,h),"fakeround"===E)for(var O=Math.round(180*w/Math.PI/20),z=1;z2*f){var j=u.add(d.sub(u)._mult(f/N)._round());this.updateDistance(u,j),this.addCurrentVertex(j,g,0,0,h),u=j}}}}},qs.prototype.addCurrentVertex=function(t,e,r,n,i,a){void 0===a&&(a=!1);var o=e.x+e.y*r,s=e.y-e.x*r,l=-e.x+e.y*n,c=-e.y-e.x*n;this.addHalfVertex(t,o,s,a,!1,r,i),this.addHalfVertex(t,l,c,a,!0,-n,i),this.distance>Hs/2&&0===this.totalDistance&&(this.distance=0,this.addCurrentVertex(t,e,r,n,i,a))},qs.prototype.addHalfVertex=function(t,e,r,n,i,a,o){var s=t.x,l=t.y,c=.5*this.scaledDistance;this.layoutVertexArray.emplaceBack((s<<1)+(n?1:0),(l<<1)+(i?1:0),Math.round(63*e)+128,Math.round(63*r)+128,1+(0===a?0:a<0?-1:1)|(63&c)<<2,c>>6);var u=o.vertexLength++;this.e1>=0&&this.e2>=0&&(this.indexArray.emplaceBack(this.e1,this.e2,u),o.primitiveLength++),i?this.e2=u:this.e1=u},qs.prototype.updateScaledDistance=function(){this.scaledDistance=this.totalDistance>0?(this.clipStart+(this.clipEnd-this.clipStart)*this.distance/this.totalDistance)*(Hs-1):this.distance},qs.prototype.updateDistance=function(t,e){this.distance+=t.dist(e),this.updateScaledDistance()},Nn("LineBucket",qs,{omit:["layers","patternFeatures"]});var Gs=new Si({"line-cap":new wi(Lt.layout_line["line-cap"]),"line-join":new Ti(Lt.layout_line["line-join"]),"line-miter-limit":new wi(Lt.layout_line["line-miter-limit"]),"line-round-limit":new wi(Lt.layout_line["line-round-limit"]),"line-sort-key":new Ti(Lt.layout_line["line-sort-key"])}),Ys={paint:new Si({"line-opacity":new Ti(Lt.paint_line["line-opacity"]),"line-color":new Ti(Lt.paint_line["line-color"]),"line-translate":new wi(Lt.paint_line["line-translate"]),"line-translate-anchor":new wi(Lt.paint_line["line-translate-anchor"]),"line-width":new Ti(Lt.paint_line["line-width"]),"line-gap-width":new Ti(Lt.paint_line["line-gap-width"]),"line-offset":new Ti(Lt.paint_line["line-offset"]),"line-blur":new Ti(Lt.paint_line["line-blur"]),"line-dasharray":new Ai(Lt.paint_line["line-dasharray"]),"line-pattern":new ki(Lt.paint_line["line-pattern"]),"line-gradient":new Mi(Lt.paint_line["line-gradient"])}),layout:Gs},Ws=new(function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.possiblyEvaluate=function(e,r){return r=new pi(Math.floor(r.zoom),{now:r.now,fadeDuration:r.fadeDuration,zoomHistory:r.zoomHistory,transition:r.transition}),t.prototype.possiblyEvaluate.call(this,e,r)},e.prototype.evaluate=function(e,r,n,i){return r=u({},r,{zoom:Math.floor(r.zoom)}),t.prototype.evaluate.call(this,e,r,n,i)},e}(Ti))(Ys.paint.properties["line-width"].specification);Ws.useIntegerZoom=!0;var Xs=function(t){function e(e){t.call(this,e,Ys)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._handleSpecialPaintPropertyUpdate=function(t){"line-gradient"===t&&this._updateGradient()},e.prototype._updateGradient=function(){var t=this._transitionablePaint._values["line-gradient"].value.expression;this.gradient=Co(t,"lineProgress"),this.gradientTexture=null},e.prototype.recalculate=function(e,r){t.prototype.recalculate.call(this,e,r),this.paint._values["line-floorwidth"]=Ws.possiblyEvaluate(this._transitioningPaint._values["line-width"].value,e)},e.prototype.createBucket=function(t){return new qs(t)},e.prototype.queryRadius=function(t){var e=t,r=Zs(oo("line-width",this,e),oo("line-gap-width",this,e)),n=oo("line-offset",this,e);return r/2+Math.abs(n)+so(this.paint.get("line-translate"))},e.prototype.queryIntersectsFeature=function(t,e,r,n,a,o,s){var l=lo(t,this.paint.get("line-translate"),this.paint.get("line-translate-anchor"),o.angle,s),c=s/2*Zs(this.paint.get("line-width").evaluate(e,r),this.paint.get("line-gap-width").evaluate(e,r)),u=this.paint.get("line-offset").evaluate(e,r);return u&&(n=function(t,e){for(var r=[],n=new i(0,0),a=0;a=3)for(var a=0;a0?e+2*t:t}var Js=Ii([{name:"a_pos_offset",components:4,type:"Int16"},{name:"a_data",components:4,type:"Uint16"},{name:"a_pixeloffset",components:4,type:"Int16"}],4),Ks=Ii([{name:"a_projected_pos",components:3,type:"Float32"}],4),Qs=(Ii([{name:"a_fade_opacity",components:1,type:"Uint32"}],4),Ii([{name:"a_placed",components:2,type:"Uint8"},{name:"a_shift",components:2,type:"Float32"}])),$s=(Ii([{type:"Int16",name:"anchorPointX"},{type:"Int16",name:"anchorPointY"},{type:"Int16",name:"x1"},{type:"Int16",name:"y1"},{type:"Int16",name:"x2"},{type:"Int16",name:"y2"},{type:"Uint32",name:"featureIndex"},{type:"Uint16",name:"sourceLayerIndex"},{type:"Uint16",name:"bucketIndex"}]),Ii([{name:"a_pos",components:2,type:"Int16"},{name:"a_anchor_pos",components:2,type:"Int16"},{name:"a_extrude",components:2,type:"Int16"}],4)),tl=Ii([{name:"a_pos",components:2,type:"Float32"},{name:"a_radius",components:1,type:"Float32"},{name:"a_flags",components:2,type:"Int16"}],4);Ii([{name:"triangle",components:3,type:"Uint16"}]),Ii([{type:"Int16",name:"anchorX"},{type:"Int16",name:"anchorY"},{type:"Uint16",name:"glyphStartIndex"},{type:"Uint16",name:"numGlyphs"},{type:"Uint32",name:"vertexStartIndex"},{type:"Uint32",name:"lineStartIndex"},{type:"Uint32",name:"lineLength"},{type:"Uint16",name:"segment"},{type:"Uint16",name:"lowerSize"},{type:"Uint16",name:"upperSize"},{type:"Float32",name:"lineOffsetX"},{type:"Float32",name:"lineOffsetY"},{type:"Uint8",name:"writingMode"},{type:"Uint8",name:"placedOrientation"},{type:"Uint8",name:"hidden"},{type:"Uint32",name:"crossTileID"},{type:"Int16",name:"associatedIconIndex"}]),Ii([{type:"Int16",name:"anchorX"},{type:"Int16",name:"anchorY"},{type:"Int16",name:"rightJustifiedTextSymbolIndex"},{type:"Int16",name:"centerJustifiedTextSymbolIndex"},{type:"Int16",name:"leftJustifiedTextSymbolIndex"},{type:"Int16",name:"verticalPlacedTextSymbolIndex"},{type:"Int16",name:"placedIconSymbolIndex"},{type:"Int16",name:"verticalPlacedIconSymbolIndex"},{type:"Uint16",name:"key"},{type:"Uint16",name:"textBoxStartIndex"},{type:"Uint16",name:"textBoxEndIndex"},{type:"Uint16",name:"verticalTextBoxStartIndex"},{type:"Uint16",name:"verticalTextBoxEndIndex"},{type:"Uint16",name:"iconBoxStartIndex"},{type:"Uint16",name:"iconBoxEndIndex"},{type:"Uint16",name:"verticalIconBoxStartIndex"},{type:"Uint16",name:"verticalIconBoxEndIndex"},{type:"Uint16",name:"featureIndex"},{type:"Uint16",name:"numHorizontalGlyphVertices"},{type:"Uint16",name:"numVerticalGlyphVertices"},{type:"Uint16",name:"numIconVertices"},{type:"Uint16",name:"numVerticalIconVertices"},{type:"Uint16",name:"useRuntimeCollisionCircles"},{type:"Uint32",name:"crossTileID"},{type:"Float32",name:"textBoxScale"},{type:"Float32",components:2,name:"textOffset"},{type:"Float32",name:"collisionCircleDiameter"}]),Ii([{type:"Float32",name:"offsetX"}]),Ii([{type:"Int16",name:"x"},{type:"Int16",name:"y"},{type:"Int16",name:"tileUnitDistanceFromAnchor"}]);function el(t,e,r){return t.sections.forEach((function(t){t.text=function(t,e,r){var n=e.layout.get("text-transform").evaluate(r,{});return"uppercase"===n?t=t.toLocaleUpperCase():"lowercase"===n&&(t=t.toLocaleLowerCase()),hi.applyArabicShaping&&(t=hi.applyArabicShaping(t)),t}(t.text,e,r)})),t}var rl={"!":"\ufe15","#":"\uff03",$:"\uff04","%":"\uff05","&":"\uff06","(":"\ufe35",")":"\ufe36","*":"\uff0a","+":"\uff0b",",":"\ufe10","-":"\ufe32",".":"\u30fb","/":"\uff0f",":":"\ufe13",";":"\ufe14","<":"\ufe3f","=":"\uff1d",">":"\ufe40","?":"\ufe16","@":"\uff20","[":"\ufe47","\\":"\uff3c","]":"\ufe48","^":"\uff3e",_:"\ufe33","`":"\uff40","{":"\ufe37","|":"\u2015","}":"\ufe38","~":"\uff5e","\xa2":"\uffe0","\xa3":"\uffe1","\xa5":"\uffe5","\xa6":"\uffe4","\xac":"\uffe2","\xaf":"\uffe3","\u2013":"\ufe32","\u2014":"\ufe31","\u2018":"\ufe43","\u2019":"\ufe44","\u201c":"\ufe41","\u201d":"\ufe42","\u2026":"\ufe19","\u2027":"\u30fb","\u20a9":"\uffe6","\u3001":"\ufe11","\u3002":"\ufe12","\u3008":"\ufe3f","\u3009":"\ufe40","\u300a":"\ufe3d","\u300b":"\ufe3e","\u300c":"\ufe41","\u300d":"\ufe42","\u300e":"\ufe43","\u300f":"\ufe44","\u3010":"\ufe3b","\u3011":"\ufe3c","\u3014":"\ufe39","\u3015":"\ufe3a","\u3016":"\ufe17","\u3017":"\ufe18","\uff01":"\ufe15","\uff08":"\ufe35","\uff09":"\ufe36","\uff0c":"\ufe10","\uff0d":"\ufe32","\uff0e":"\u30fb","\uff1a":"\ufe13","\uff1b":"\ufe14","\uff1c":"\ufe3f","\uff1e":"\ufe40","\uff1f":"\ufe16","\uff3b":"\ufe47","\uff3d":"\ufe48","\uff3f":"\ufe33","\uff5b":"\ufe37","\uff5c":"\u2015","\uff5d":"\ufe38","\uff5f":"\ufe35","\uff60":"\ufe36","\uff61":"\ufe12","\uff62":"\ufe41","\uff63":"\ufe42"};var nl=function(t,e,r,n,i){var a,o,s=8*i-n-1,l=(1<>1,u=-7,f=r?i-1:0,h=r?-1:1,p=t[e+f];for(f+=h,a=p&(1<<-u)-1,p>>=-u,u+=s;u>0;a=256*a+t[e+f],f+=h,u-=8);for(o=a&(1<<-u)-1,a>>=-u,u+=n;u>0;o=256*o+t[e+f],f+=h,u-=8);if(0===a)a=1-c;else{if(a===l)return o?NaN:1/0*(p?-1:1);o+=Math.pow(2,n),a-=c}return(p?-1:1)*o*Math.pow(2,a-n)},il=function(t,e,r,n,i,a){var o,s,l,c=8*a-i-1,u=(1<>1,h=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,p=n?0:a-1,d=n?1:-1,m=e<0||0===e&&1/e<0?1:0;for(e=Math.abs(e),isNaN(e)||e===1/0?(s=isNaN(e)?1:0,o=u):(o=Math.floor(Math.log(e)/Math.LN2),e*(l=Math.pow(2,-o))<1&&(o--,l*=2),(e+=o+f>=1?h/l:h*Math.pow(2,1-f))*l>=2&&(o++,l/=2),o+f>=u?(s=0,o=u):o+f>=1?(s=(e*l-1)*Math.pow(2,i),o+=f):(s=e*Math.pow(2,f-1)*Math.pow(2,i),o=0));i>=8;t[r+p]=255&s,p+=d,s/=256,i-=8);for(o=o<0;t[r+p]=255&o,p+=d,o/=256,c-=8);t[r+p-d]|=128*m},al=ol;function ol(t){this.buf=ArrayBuffer.isView&&ArrayBuffer.isView(t)?t:new Uint8Array(t||0),this.pos=0,this.type=0,this.length=this.buf.length}ol.Varint=0,ol.Fixed64=1,ol.Bytes=2,ol.Fixed32=5;var sl="undefined"==typeof TextDecoder?null:new TextDecoder("utf8");function ll(t){return t.type===ol.Bytes?t.readVarint()+t.pos:t.pos+1}function cl(t,e,r){return r?4294967296*e+(t>>>0):4294967296*(e>>>0)+(t>>>0)}function ul(t,e,r){var n=e<=16383?1:e<=2097151?2:e<=268435455?3:Math.floor(Math.log(e)/(7*Math.LN2));r.realloc(n);for(var i=r.pos-1;i>=t;i--)r.buf[i+n]=r.buf[i]}function fl(t,e){for(var r=0;r>>8,t[r+2]=e>>>16,t[r+3]=e>>>24}function wl(t,e){return(t[e]|t[e+1]<<8|t[e+2]<<16)+(t[e+3]<<24)}ol.prototype={destroy:function(){this.buf=null},readFields:function(t,e,r){for(r=r||this.length;this.pos>3,a=this.pos;this.type=7&n,t(i,e,this),this.pos===a&&this.skip(n)}return e},readMessage:function(t,e){return this.readFields(t,e,this.readVarint()+this.pos)},readFixed32:function(){var t=bl(this.buf,this.pos);return this.pos+=4,t},readSFixed32:function(){var t=wl(this.buf,this.pos);return this.pos+=4,t},readFixed64:function(){var t=bl(this.buf,this.pos)+4294967296*bl(this.buf,this.pos+4);return this.pos+=8,t},readSFixed64:function(){var t=bl(this.buf,this.pos)+4294967296*wl(this.buf,this.pos+4);return this.pos+=8,t},readFloat:function(){var t=nl(this.buf,this.pos,!0,23,4);return this.pos+=4,t},readDouble:function(){var t=nl(this.buf,this.pos,!0,52,8);return this.pos+=8,t},readVarint:function(t){var e,r,n=this.buf;return e=127&(r=n[this.pos++]),r<128?e:(e|=(127&(r=n[this.pos++]))<<7,r<128?e:(e|=(127&(r=n[this.pos++]))<<14,r<128?e:(e|=(127&(r=n[this.pos++]))<<21,r<128?e:function(t,e,r){var n,i,a=r.buf;if(i=a[r.pos++],n=(112&i)>>4,i<128)return cl(t,n,e);if(i=a[r.pos++],n|=(127&i)<<3,i<128)return cl(t,n,e);if(i=a[r.pos++],n|=(127&i)<<10,i<128)return cl(t,n,e);if(i=a[r.pos++],n|=(127&i)<<17,i<128)return cl(t,n,e);if(i=a[r.pos++],n|=(127&i)<<24,i<128)return cl(t,n,e);if(i=a[r.pos++],n|=(1&i)<<31,i<128)return cl(t,n,e);throw new Error("Expected varint not more than 10 bytes")}(e|=(15&(r=n[this.pos]))<<28,t,this))))},readVarint64:function(){return this.readVarint(!0)},readSVarint:function(){var t=this.readVarint();return t%2==1?(t+1)/-2:t/2},readBoolean:function(){return Boolean(this.readVarint())},readString:function(){var t=this.readVarint()+this.pos,e=this.pos;return this.pos=t,t-e>=12&&sl?function(t,e,r){return sl.decode(t.subarray(e,r))}(this.buf,e,t):function(t,e,r){var n="",i=e;for(;i239?4:l>223?3:l>191?2:1;if(i+u>r)break;1===u?l<128&&(c=l):2===u?128==(192&(a=t[i+1]))&&(c=(31&l)<<6|63&a)<=127&&(c=null):3===u?(a=t[i+1],o=t[i+2],128==(192&a)&&128==(192&o)&&((c=(15&l)<<12|(63&a)<<6|63&o)<=2047||c>=55296&&c<=57343)&&(c=null)):4===u&&(a=t[i+1],o=t[i+2],s=t[i+3],128==(192&a)&&128==(192&o)&&128==(192&s)&&((c=(15&l)<<18|(63&a)<<12|(63&o)<<6|63&s)<=65535||c>=1114112)&&(c=null)),null===c?(c=65533,u=1):c>65535&&(c-=65536,n+=String.fromCharCode(c>>>10&1023|55296),c=56320|1023&c),n+=String.fromCharCode(c),i+=u}return n}(this.buf,e,t)},readBytes:function(){var t=this.readVarint()+this.pos,e=this.buf.subarray(this.pos,t);return this.pos=t,e},readPackedVarint:function(t,e){if(this.type!==ol.Bytes)return t.push(this.readVarint(e));var r=ll(this);for(t=t||[];this.pos127;);else if(e===ol.Bytes)this.pos=this.readVarint()+this.pos;else if(e===ol.Fixed32)this.pos+=4;else{if(e!==ol.Fixed64)throw new Error("Unimplemented type: "+e);this.pos+=8}},writeTag:function(t,e){this.writeVarint(t<<3|e)},realloc:function(t){for(var e=this.length||16;e268435455||t<0?function(t,e){var r,n;t>=0?(r=t%4294967296|0,n=t/4294967296|0):(n=~(-t/4294967296),4294967295^(r=~(-t%4294967296))?r=r+1|0:(r=0,n=n+1|0));if(t>=0x10000000000000000||t<-0x10000000000000000)throw new Error("Given varint doesn't fit into 10 bytes");e.realloc(10),function(t,e,r){r.buf[r.pos++]=127&t|128,t>>>=7,r.buf[r.pos++]=127&t|128,t>>>=7,r.buf[r.pos++]=127&t|128,t>>>=7,r.buf[r.pos++]=127&t|128,t>>>=7,r.buf[r.pos]=127&t}(r,0,e),function(t,e){var r=(7&t)<<4;if(e.buf[e.pos++]|=r|((t>>>=3)?128:0),!t)return;if(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),!t)return;if(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),!t)return;if(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),!t)return;if(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),!t)return;e.buf[e.pos++]=127&t}(n,e)}(t,this):(this.realloc(4),this.buf[this.pos++]=127&t|(t>127?128:0),t<=127||(this.buf[this.pos++]=127&(t>>>=7)|(t>127?128:0),t<=127||(this.buf[this.pos++]=127&(t>>>=7)|(t>127?128:0),t<=127||(this.buf[this.pos++]=t>>>7&127))))},writeSVarint:function(t){this.writeVarint(t<0?2*-t-1:2*t)},writeBoolean:function(t){this.writeVarint(Boolean(t))},writeString:function(t){t=String(t),this.realloc(4*t.length),this.pos++;var e=this.pos;this.pos=function(t,e,r){for(var n,i,a=0;a55295&&n<57344){if(!i){n>56319||a+1===e.length?(t[r++]=239,t[r++]=191,t[r++]=189):i=n;continue}if(n<56320){t[r++]=239,t[r++]=191,t[r++]=189,i=n;continue}n=i-55296<<10|n-56320|65536,i=null}else i&&(t[r++]=239,t[r++]=191,t[r++]=189,i=null);n<128?t[r++]=n:(n<2048?t[r++]=n>>6|192:(n<65536?t[r++]=n>>12|224:(t[r++]=n>>18|240,t[r++]=n>>12&63|128),t[r++]=n>>6&63|128),t[r++]=63&n|128)}return r}(this.buf,t,this.pos);var r=this.pos-e;r>=128&&ul(e,r,this),this.pos=e-1,this.writeVarint(r),this.pos+=r},writeFloat:function(t){this.realloc(4),il(this.buf,t,this.pos,!0,23,4),this.pos+=4},writeDouble:function(t){this.realloc(8),il(this.buf,t,this.pos,!0,52,8),this.pos+=8},writeBytes:function(t){var e=t.length;this.writeVarint(e),this.realloc(e);for(var r=0;r=128&&ul(r,n,this),this.pos=r-1,this.writeVarint(n),this.pos+=n},writeMessage:function(t,e,r){this.writeTag(t,ol.Bytes),this.writeRawMessage(e,r)},writePackedVarint:function(t,e){e.length&&this.writeMessage(t,fl,e)},writePackedSVarint:function(t,e){e.length&&this.writeMessage(t,hl,e)},writePackedBoolean:function(t,e){e.length&&this.writeMessage(t,ml,e)},writePackedFloat:function(t,e){e.length&&this.writeMessage(t,pl,e)},writePackedDouble:function(t,e){e.length&&this.writeMessage(t,dl,e)},writePackedFixed32:function(t,e){e.length&&this.writeMessage(t,gl,e)},writePackedSFixed32:function(t,e){e.length&&this.writeMessage(t,vl,e)},writePackedFixed64:function(t,e){e.length&&this.writeMessage(t,yl,e)},writePackedSFixed64:function(t,e){e.length&&this.writeMessage(t,xl,e)},writeBytesField:function(t,e){this.writeTag(t,ol.Bytes),this.writeBytes(e)},writeFixed32Field:function(t,e){this.writeTag(t,ol.Fixed32),this.writeFixed32(e)},writeSFixed32Field:function(t,e){this.writeTag(t,ol.Fixed32),this.writeSFixed32(e)},writeFixed64Field:function(t,e){this.writeTag(t,ol.Fixed64),this.writeFixed64(e)},writeSFixed64Field:function(t,e){this.writeTag(t,ol.Fixed64),this.writeSFixed64(e)},writeVarintField:function(t,e){this.writeTag(t,ol.Varint),this.writeVarint(e)},writeSVarintField:function(t,e){this.writeTag(t,ol.Varint),this.writeSVarint(e)},writeStringField:function(t,e){this.writeTag(t,ol.Bytes),this.writeString(e)},writeFloatField:function(t,e){this.writeTag(t,ol.Fixed32),this.writeFloat(e)},writeDoubleField:function(t,e){this.writeTag(t,ol.Fixed64),this.writeDouble(e)},writeBooleanField:function(t,e){this.writeVarintField(t,Boolean(e))}};function Tl(t,e,r){1===t&&r.readMessage(kl,e)}function kl(t,e,r){if(3===t){var n=r.readMessage(Al,{}),i=n.id,a=n.bitmap,o=n.width,s=n.height,l=n.left,c=n.top,u=n.advance;e.push({id:i,bitmap:new So({width:o+6,height:s+6},a),metrics:{width:o,height:s,left:l,top:c,advance:u}})}}function Al(t,e,r){1===t?e.id=r.readVarint():2===t?e.bitmap=r.readBytes():3===t?e.width=r.readVarint():4===t?e.height=r.readVarint():5===t?e.left=r.readSVarint():6===t?e.top=r.readSVarint():7===t&&(e.advance=r.readVarint())}function Ml(t){for(var e=0,r=0,n=0,i=t;n=0;h--){var p=o[h];if(!(f.w>p.w||f.h>p.h)){if(f.x=p.x,f.y=p.y,l=Math.max(l,f.y+f.h),s=Math.max(s,f.x+f.w),f.w===p.w&&f.h===p.h){var d=o.pop();h0&&N>A&&(A=N)}else{var j=r[S.fontStack],U=j&&j[L];if(U&&U.rect)I=U.rect,P=U.metrics;else{var V=e[S.fontStack],H=V&&V[L];if(!H)continue;P=H.metrics}C=24*(_-S.scale)}D?(t.verticalizable=!0,k.push({glyph:L,imageName:O,x:h,y:p+C,vertical:D,scale:S.scale,fontStack:S.fontStack,sectionIndex:E,metrics:P,rect:I}),h+=z*S.scale+c):(k.push({glyph:L,imageName:O,x:h,y:p+C,vertical:D,scale:S.scale,fontStack:S.fontStack,sectionIndex:E,metrics:P,rect:I}),h+=P.advance*S.scale+c)}if(0!==k.length){var q=h-c;d=Math.max(q,d),Vl(k,0,k.length-1,g,A)}h=0;var G=a*_+A;T.lineOffset=Math.max(A,w),p+=G,m=Math.max(G,m),++v}else p+=a,++v}var Y;var W=p- -17,X=Ul(o),Z=X.horizontalAlign,J=X.verticalAlign;(function(t,e,r,n,i,a,o,s,l){var c=(e-r)*i,u=0;u=a!==o?-s*n- -17:(-n*l+.5)*o;for(var f=0,h=t;f=0&&n>=t&&zl[this.text.charCodeAt(n)];n--)r--;this.text=this.text.substring(t,r),this.sectionIndex=this.sectionIndex.slice(t,r)},Il.prototype.substring=function(t,e){var r=new Il;return r.text=this.text.substring(t,e),r.sectionIndex=this.sectionIndex.slice(t,e),r.sections=this.sections,r},Il.prototype.toString=function(){return this.text},Il.prototype.getMaxScale=function(){var t=this;return this.sectionIndex.reduce((function(e,r){return Math.max(e,t.sections[r].scale)}),0)},Il.prototype.addTextSection=function(t,e){this.text+=t.text,this.sections.push(Pl.forText(t.scale,t.fontStack||e));for(var r=this.sections.length-1,n=0;n=63743?null:++this.imageSectionID:(this.imageSectionID=57344,this.imageSectionID)};var zl={9:!0,10:!0,11:!0,12:!0,13:!0,32:!0},Dl={};function Rl(t,e,r,n,i,a){if(e.imageName){var o=n[e.imageName];return o?o.displaySize[0]*e.scale*24/a+i:0}var s=r[e.fontStack],l=s&&s[t];return l?l.metrics.advance*e.scale+i:0}function Fl(t,e,r,n){var i=Math.pow(t-e,2);return n?t=0,f=0,h=0;h-r/2;){if(--o<0)return!1;s-=t[o].dist(a),a=t[o]}s+=t[o].dist(t[o+1]),o++;for(var l=[],c=0;sn;)c-=l.shift().angleDelta;if(c>i)return!1;o++,s+=f.dist(h)}return!0}function Jl(t){for(var e=0,r=0;rc){var d=(c-l)/p,m=qe(f.x,h.x,d),g=qe(f.y,h.y,d),v=new ql(m,g,h.angleTo(f),u);return v._round(),!o||Zl(t,v,s,o,e)?v:void 0}l+=p}}function tc(t,e,r,n,i,a,o,s,l){var c=Kl(n,a,o),u=Ql(n,i),f=u*o,h=0===t[0].x||t[0].x===l||0===t[0].y||t[0].y===l;return e-f=0&&_=0&&w=0&&p+u<=f){var T=new ql(_,w,x,m);T._round(),i&&!Zl(e,T,o,i,a)||d.push(T)}}h+=y}l||d.length||s||(d=t(e,h/2,n,i,a,o,s,!0,c));return d}(t,h?e/2*s%e:(u/2+2*a)*o*s%e,e,c,r,f,h,!1,l)}function ec(t,e,r,n,a){for(var o=[],s=0;s=n&&h.x>=n||(f.x>=n?f=new i(n,f.y+(h.y-f.y)*((n-f.x)/(h.x-f.x)))._round():h.x>=n&&(h=new i(n,f.y+(h.y-f.y)*((n-f.x)/(h.x-f.x)))._round()),f.y>=a&&h.y>=a||(f.y>=a?f=new i(f.x+(h.x-f.x)*((a-f.y)/(h.y-f.y)),a)._round():h.y>=a&&(h=new i(f.x+(h.x-f.x)*((a-f.y)/(h.y-f.y)),a)._round()),c&&f.equals(c[c.length-1])||(c=[f],o.push(c)),c.push(h)))))}return o}function rc(t,e,r,n){var a=[],o=t.image,s=o.pixelRatio,l=o.paddedRect.w-2,c=o.paddedRect.h-2,u=t.right-t.left,f=t.bottom-t.top,h=o.stretchX||[[0,l]],p=o.stretchY||[[0,c]],d=function(t,e){return t+e[1]-e[0]},m=h.reduce(d,0),g=p.reduce(d,0),v=l-m,y=c-g,x=0,b=m,_=0,w=g,T=0,k=v,A=0,M=y;if(o.content&&n){var S=o.content;x=nc(h,0,S[0]),_=nc(p,0,S[1]),b=nc(h,S[0],S[2]),w=nc(p,S[1],S[3]),T=S[0]-x,A=S[1]-_,k=S[2]-S[0]-b,M=S[3]-S[1]-w}var E=function(n,a,l,c){var h=ac(n.stretch-x,b,u,t.left),p=oc(n.fixed-T,k,n.stretch,m),d=ac(a.stretch-_,w,f,t.top),v=oc(a.fixed-A,M,a.stretch,g),y=ac(l.stretch-x,b,u,t.left),S=oc(l.fixed-T,k,l.stretch,m),E=ac(c.stretch-_,w,f,t.top),L=oc(c.fixed-A,M,c.stretch,g),C=new i(h,d),P=new i(y,d),I=new i(y,E),O=new i(h,E),z=new i(p/s,v/s),D=new i(S/s,L/s),R=e*Math.PI/180;if(R){var F=Math.sin(R),B=Math.cos(R),N=[B,-F,F,B];C._matMult(N),P._matMult(N),O._matMult(N),I._matMult(N)}var j=n.stretch+n.fixed,U=l.stretch+l.fixed,V=a.stretch+a.fixed,H=c.stretch+c.fixed;return{tl:C,tr:P,bl:O,br:I,tex:{x:o.paddedRect.x+1+j,y:o.paddedRect.y+1+V,w:U-j,h:H-V},writingMode:void 0,glyphOffset:[0,0],sectionIndex:0,pixelOffsetTL:z,pixelOffsetBR:D,minFontScaleX:k/s/u,minFontScaleY:M/s/f,isSDF:r}};if(n&&(o.stretchX||o.stretchY))for(var L=ic(h,v,m),C=ic(p,y,g),P=0;P0&&(d=Math.max(10,d),this.circleDiameter=d)}else{var m=o.top*s-l,g=o.bottom*s+l,v=o.left*s-l,y=o.right*s+l,x=o.collisionPadding;if(x&&(v-=x[0]*s,m-=x[1]*s,y+=x[2]*s,g+=x[3]*s),u){var b=new i(v,m),_=new i(y,m),w=new i(v,g),T=new i(y,g),k=u*Math.PI/180;b._rotate(k),_._rotate(k),w._rotate(k),T._rotate(k),v=Math.min(b.x,_.x,w.x,T.x),y=Math.max(b.x,_.x,w.x,T.x),m=Math.min(b.y,_.y,w.y,T.y),g=Math.max(b.y,_.y,w.y,T.y)}t.emplaceBack(e.x,e.y,v,m,y,g,r,n,a)}this.boxEndIndex=t.length},lc=function(t,e){if(void 0===t&&(t=[]),void 0===e&&(e=cc),this.data=t,this.length=this.data.length,this.compare=e,this.length>0)for(var r=(this.length>>1)-1;r>=0;r--)this._down(r)};function cc(t,e){return te?1:0}function uc(t,e,r){void 0===e&&(e=1),void 0===r&&(r=!1);for(var n=1/0,a=1/0,o=-1/0,s=-1/0,l=t[0],c=0;co)&&(o=u.x),(!c||u.y>s)&&(s=u.y)}var f=o-n,h=s-a,p=Math.min(f,h),d=p/2,m=new lc([],fc);if(0===p)return new i(n,a);for(var g=n;gy.d||!y.d)&&(y=b,r&&console.log("found best %d after %d probes",Math.round(1e4*b.d)/1e4,x)),b.max-y.d<=e||(d=b.h/2,m.push(new hc(b.p.x-d,b.p.y-d,d,t)),m.push(new hc(b.p.x+d,b.p.y-d,d,t)),m.push(new hc(b.p.x-d,b.p.y+d,d,t)),m.push(new hc(b.p.x+d,b.p.y+d,d,t)),x+=4)}return r&&(console.log("num probes: "+x),console.log("best distance: "+y.d)),y.p}function fc(t,e){return e.max-t.max}function hc(t,e,r,n){this.p=new i(t,e),this.h=r,this.d=function(t,e){for(var r=!1,n=1/0,i=0;it.y!=u.y>t.y&&t.x<(u.x-c.x)*(t.y-c.y)/(u.y-c.y)+c.x&&(r=!r),n=Math.min(n,ro(t,c,u))}return(r?1:-1)*Math.sqrt(n)}(this.p,n),this.max=this.d+this.h*Math.SQRT2}lc.prototype.push=function(t){this.data.push(t),this.length++,this._up(this.length-1)},lc.prototype.pop=function(){if(0!==this.length){var t=this.data[0],e=this.data.pop();return this.length--,this.length>0&&(this.data[0]=e,this._down(0)),t}},lc.prototype.peek=function(){return this.data[0]},lc.prototype._up=function(t){for(var e=this.data,r=this.compare,n=e[t];t>0;){var i=t-1>>1,a=e[i];if(r(n,a)>=0)break;e[t]=a,t=i}e[t]=n},lc.prototype._down=function(t){for(var e=this.data,r=this.compare,n=this.length>>1,i=e[t];t=0)break;e[t]=o,t=a}e[t]=i};var pc=Number.POSITIVE_INFINITY;function dc(t,e){return e[1]!==pc?function(t,e,r){var n=0,i=0;switch(e=Math.abs(e),r=Math.abs(r),t){case"top-right":case"top-left":case"top":i=r-7;break;case"bottom-right":case"bottom-left":case"bottom":i=7-r}switch(t){case"top-right":case"bottom-right":case"right":n=-e;break;case"top-left":case"bottom-left":case"left":n=e}return[n,i]}(t,e[0],e[1]):function(t,e){var r=0,n=0;e<0&&(e=0);var i=e/Math.sqrt(2);switch(t){case"top-right":case"top-left":n=i-7;break;case"bottom-right":case"bottom-left":n=7-i;break;case"bottom":n=7-e;break;case"top":n=e-7}switch(t){case"top-right":case"bottom-right":r=-i;break;case"top-left":case"bottom-left":r=i;break;case"left":r=e;break;case"right":r=-e}return[r,n]}(t,e[0])}function mc(t){switch(t){case"right":case"top-right":case"bottom-right":return"right";case"left":case"top-left":case"bottom-left":return"left"}return"center"}function gc(t,e,r,n,a,o,s,l,c,u,f,h,p,d,m){var g=function(t,e,r,n,a,o,s,l){for(var c=n.layout.get("text-rotate").evaluate(o,{})*Math.PI/180,u=[],f=0,h=e.positionedLines;f32640&&_(t.layerIds[0]+': Value for "text-size" is >= 255. Reduce your "text-size".'):"composite"===v.kind&&((y=[128*d.compositeTextSizes[0].evaluate(s,{},m),128*d.compositeTextSizes[1].evaluate(s,{},m)])[0]>32640||y[1]>32640)&&_(t.layerIds[0]+': Value for "text-size" is >= 255. Reduce your "text-size".'),t.addSymbols(t.text,g,y,l,o,s,u,e,c.lineStartIndex,c.lineLength,p,m);for(var x=0,b=f;x=0;o--)if(n.dist(a[o])0)&&("constant"!==a.value.kind||a.value.value.length>0),c="constant"!==s.value.kind||!!s.value.value||Object.keys(s.parameters).length>0,u=i.get("symbol-sort-key");if(this.features=[],l||c){for(var f=e.iconDependencies,h=e.glyphDependencies,p=e.availableImages,d=new pi(this.zoom),m=0,g=t;m=0;for(var O=0,z=k.sections;O=0;s--)a[s]={x:e[s].x,y:e[s].y,tileUnitDistanceFromAnchor:i},s>0&&(i+=e[s-1].dist(e[s]));for(var l=0;l0},Mc.prototype.hasIconData=function(){return this.icon.segments.get().length>0},Mc.prototype.hasDebugData=function(){return this.textCollisionBox&&this.iconCollisionBox},Mc.prototype.hasTextCollisionBoxData=function(){return this.hasDebugData()&&this.textCollisionBox.segments.get().length>0},Mc.prototype.hasIconCollisionBoxData=function(){return this.hasDebugData()&&this.iconCollisionBox.segments.get().length>0},Mc.prototype.addIndicesForPlacedSymbol=function(t,e){for(var r=t.placedSymbolArray.get(e),n=r.vertexStartIndex+4*r.numGlyphs,i=r.vertexStartIndex;i1||this.icon.segments.get().length>1)){this.symbolInstanceIndexes=this.getSortedSymbolIndexes(t),this.sortedAngle=t,this.text.indexArray.clear(),this.icon.indexArray.clear(),this.featureSortOrder=[];for(var r=0,n=this.symbolInstanceIndexes;r=0&&n.indexOf(t)===r&&e.addIndicesForPlacedSymbol(e.text,t)})),a.verticalPlacedTextSymbolIndex>=0&&this.addIndicesForPlacedSymbol(this.text,a.verticalPlacedTextSymbolIndex),a.placedIconSymbolIndex>=0&&this.addIndicesForPlacedSymbol(this.icon,a.placedIconSymbolIndex),a.verticalPlacedIconSymbolIndex>=0&&this.addIndicesForPlacedSymbol(this.icon,a.verticalPlacedIconSymbolIndex)}this.text.indexBuffer&&this.text.indexBuffer.updateData(this.text.indexArray),this.icon.indexBuffer&&this.icon.indexBuffer.updateData(this.icon.indexArray)}},Nn("SymbolBucket",Mc,{omit:["layers","collisionBoxArray","features","compareText"]}),Mc.MAX_GLYPHS=65535,Mc.addDynamicAttributes=wc;var Sc=new Si({"symbol-placement":new wi(Lt.layout_symbol["symbol-placement"]),"symbol-spacing":new wi(Lt.layout_symbol["symbol-spacing"]),"symbol-avoid-edges":new wi(Lt.layout_symbol["symbol-avoid-edges"]),"symbol-sort-key":new Ti(Lt.layout_symbol["symbol-sort-key"]),"symbol-z-order":new wi(Lt.layout_symbol["symbol-z-order"]),"icon-allow-overlap":new wi(Lt.layout_symbol["icon-allow-overlap"]),"icon-ignore-placement":new wi(Lt.layout_symbol["icon-ignore-placement"]),"icon-optional":new wi(Lt.layout_symbol["icon-optional"]),"icon-rotation-alignment":new wi(Lt.layout_symbol["icon-rotation-alignment"]),"icon-size":new Ti(Lt.layout_symbol["icon-size"]),"icon-text-fit":new wi(Lt.layout_symbol["icon-text-fit"]),"icon-text-fit-padding":new wi(Lt.layout_symbol["icon-text-fit-padding"]),"icon-image":new Ti(Lt.layout_symbol["icon-image"]),"icon-rotate":new Ti(Lt.layout_symbol["icon-rotate"]),"icon-padding":new wi(Lt.layout_symbol["icon-padding"]),"icon-keep-upright":new wi(Lt.layout_symbol["icon-keep-upright"]),"icon-offset":new Ti(Lt.layout_symbol["icon-offset"]),"icon-anchor":new Ti(Lt.layout_symbol["icon-anchor"]),"icon-pitch-alignment":new wi(Lt.layout_symbol["icon-pitch-alignment"]),"text-pitch-alignment":new wi(Lt.layout_symbol["text-pitch-alignment"]),"text-rotation-alignment":new wi(Lt.layout_symbol["text-rotation-alignment"]),"text-field":new Ti(Lt.layout_symbol["text-field"]),"text-font":new Ti(Lt.layout_symbol["text-font"]),"text-size":new Ti(Lt.layout_symbol["text-size"]),"text-max-width":new Ti(Lt.layout_symbol["text-max-width"]),"text-line-height":new wi(Lt.layout_symbol["text-line-height"]),"text-letter-spacing":new Ti(Lt.layout_symbol["text-letter-spacing"]),"text-justify":new Ti(Lt.layout_symbol["text-justify"]),"text-radial-offset":new Ti(Lt.layout_symbol["text-radial-offset"]),"text-variable-anchor":new wi(Lt.layout_symbol["text-variable-anchor"]),"text-anchor":new Ti(Lt.layout_symbol["text-anchor"]),"text-max-angle":new wi(Lt.layout_symbol["text-max-angle"]),"text-writing-mode":new wi(Lt.layout_symbol["text-writing-mode"]),"text-rotate":new Ti(Lt.layout_symbol["text-rotate"]),"text-padding":new wi(Lt.layout_symbol["text-padding"]),"text-keep-upright":new wi(Lt.layout_symbol["text-keep-upright"]),"text-transform":new Ti(Lt.layout_symbol["text-transform"]),"text-offset":new Ti(Lt.layout_symbol["text-offset"]),"text-allow-overlap":new wi(Lt.layout_symbol["text-allow-overlap"]),"text-ignore-placement":new wi(Lt.layout_symbol["text-ignore-placement"]),"text-optional":new wi(Lt.layout_symbol["text-optional"])}),Ec={paint:new Si({"icon-opacity":new Ti(Lt.paint_symbol["icon-opacity"]),"icon-color":new Ti(Lt.paint_symbol["icon-color"]),"icon-halo-color":new Ti(Lt.paint_symbol["icon-halo-color"]),"icon-halo-width":new Ti(Lt.paint_symbol["icon-halo-width"]),"icon-halo-blur":new Ti(Lt.paint_symbol["icon-halo-blur"]),"icon-translate":new wi(Lt.paint_symbol["icon-translate"]),"icon-translate-anchor":new wi(Lt.paint_symbol["icon-translate-anchor"]),"text-opacity":new Ti(Lt.paint_symbol["text-opacity"]),"text-color":new Ti(Lt.paint_symbol["text-color"],{runtimeType:Ut,getOverride:function(t){return t.textColor},hasOverride:function(t){return!!t.textColor}}),"text-halo-color":new Ti(Lt.paint_symbol["text-halo-color"]),"text-halo-width":new Ti(Lt.paint_symbol["text-halo-width"]),"text-halo-blur":new Ti(Lt.paint_symbol["text-halo-blur"]),"text-translate":new wi(Lt.paint_symbol["text-translate"]),"text-translate-anchor":new wi(Lt.paint_symbol["text-translate-anchor"])}),layout:Sc},Lc=function(t){this.type=t.property.overrides?t.property.overrides.runtimeType:Ft,this.defaultValue=t};Lc.prototype.evaluate=function(t){if(t.formattedSection){var e=this.defaultValue.property.overrides;if(e&&e.hasOverride(t.formattedSection))return e.getOverride(t.formattedSection)}return t.feature&&t.featureState?this.defaultValue.evaluate(t.feature,t.featureState):this.defaultValue.property.specification.default},Lc.prototype.eachChild=function(t){this.defaultValue.isConstant()||t(this.defaultValue.value._styleExpression.expression)},Lc.prototype.outputDefined=function(){return!1},Lc.prototype.serialize=function(){return null},Nn("FormatSectionOverride",Lc,{omit:["defaultValue"]});var Cc=function(t){function e(e){t.call(this,e,Ec)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.recalculate=function(e,r){if(t.prototype.recalculate.call(this,e,r),"auto"===this.layout.get("icon-rotation-alignment")&&("point"!==this.layout.get("symbol-placement")?this.layout._values["icon-rotation-alignment"]="map":this.layout._values["icon-rotation-alignment"]="viewport"),"auto"===this.layout.get("text-rotation-alignment")&&("point"!==this.layout.get("symbol-placement")?this.layout._values["text-rotation-alignment"]="map":this.layout._values["text-rotation-alignment"]="viewport"),"auto"===this.layout.get("text-pitch-alignment")&&(this.layout._values["text-pitch-alignment"]=this.layout.get("text-rotation-alignment")),"auto"===this.layout.get("icon-pitch-alignment")&&(this.layout._values["icon-pitch-alignment"]=this.layout.get("icon-rotation-alignment")),"point"===this.layout.get("symbol-placement")){var n=this.layout.get("text-writing-mode");if(n){for(var i=[],a=0,o=n;a",targetMapId:n,sourceMapId:a.mapId})}}},qc.prototype.receive=function(t){var e=t.data,r=e.id;if(r&&(!e.targetMapId||this.mapId===e.targetMapId))if(""===e.type){delete this.tasks[r];var n=this.cancelCallbacks[r];delete this.cancelCallbacks[r],n&&n()}else k()||e.mustQueue?(this.tasks[r]=e,this.taskQueue.push(r),this.invoker.trigger()):this.processTask(r,e)},qc.prototype.process=function(){if(this.taskQueue.length){var t=this.taskQueue.shift(),e=this.tasks[t];delete this.tasks[t],this.taskQueue.length&&this.invoker.trigger(),e&&this.processTask(t,e)}},qc.prototype.processTask=function(t,e){var r=this;if(""===e.type){var n=this.callbacks[t];delete this.callbacks[t],n&&(e.error?n(qn(e.error)):n(null,qn(e.data)))}else{var i=!1,a=S(this.globalScope)?void 0:[],o=e.hasCallback?function(e,n){i=!0,delete r.cancelCallbacks[t],r.target.postMessage({id:t,type:"",sourceMapId:r.mapId,error:e?Hn(e):null,data:Hn(n,a)},a)}:function(t){i=!0},s=null,l=qn(e.data);if(this.parent[e.type])s=this.parent[e.type](e.sourceMapId,l,o);else if(this.parent.getWorkerSource){var c=e.type.split(".");s=this.parent.getWorkerSource(e.sourceMapId,c[0],l.source)[c[1]](l,o)}else o(new Error("Could not find function "+e.type));!i&&s&&s.cancel&&(this.cancelCallbacks[t]=s.cancel)}},qc.prototype.remove=function(){this.invoker.remove(),this.target.removeEventListener("message",this.receive,!1)};var Yc=function(t,e){t&&(e?this.setSouthWest(t).setNorthEast(e):4===t.length?this.setSouthWest([t[0],t[1]]).setNorthEast([t[2],t[3]]):this.setSouthWest(t[0]).setNorthEast(t[1]))};Yc.prototype.setNorthEast=function(t){return this._ne=t instanceof Wc?new Wc(t.lng,t.lat):Wc.convert(t),this},Yc.prototype.setSouthWest=function(t){return this._sw=t instanceof Wc?new Wc(t.lng,t.lat):Wc.convert(t),this},Yc.prototype.extend=function(t){var e,r,n=this._sw,i=this._ne;if(t instanceof Wc)e=t,r=t;else{if(!(t instanceof Yc)){if(Array.isArray(t)){if(4===t.length||t.every(Array.isArray)){var a=t;return this.extend(Yc.convert(a))}var o=t;return this.extend(Wc.convert(o))}return this}if(e=t._sw,r=t._ne,!e||!r)return this}return n||i?(n.lng=Math.min(e.lng,n.lng),n.lat=Math.min(e.lat,n.lat),i.lng=Math.max(r.lng,i.lng),i.lat=Math.max(r.lat,i.lat)):(this._sw=new Wc(e.lng,e.lat),this._ne=new Wc(r.lng,r.lat)),this},Yc.prototype.getCenter=function(){return new Wc((this._sw.lng+this._ne.lng)/2,(this._sw.lat+this._ne.lat)/2)},Yc.prototype.getSouthWest=function(){return this._sw},Yc.prototype.getNorthEast=function(){return this._ne},Yc.prototype.getNorthWest=function(){return new Wc(this.getWest(),this.getNorth())},Yc.prototype.getSouthEast=function(){return new Wc(this.getEast(),this.getSouth())},Yc.prototype.getWest=function(){return this._sw.lng},Yc.prototype.getSouth=function(){return this._sw.lat},Yc.prototype.getEast=function(){return this._ne.lng},Yc.prototype.getNorth=function(){return this._ne.lat},Yc.prototype.toArray=function(){return[this._sw.toArray(),this._ne.toArray()]},Yc.prototype.toString=function(){return"LngLatBounds("+this._sw.toString()+", "+this._ne.toString()+")"},Yc.prototype.isEmpty=function(){return!(this._sw&&this._ne)},Yc.prototype.contains=function(t){var e=Wc.convert(t),r=e.lng,n=e.lat,i=this._sw.lat<=n&&n<=this._ne.lat,a=this._sw.lng<=r&&r<=this._ne.lng;return this._sw.lng>this._ne.lng&&(a=this._sw.lng>=r&&r>=this._ne.lng),i&&a},Yc.convert=function(t){return!t||t instanceof Yc?t:new Yc(t)};var Wc=function(t,e){if(isNaN(t)||isNaN(e))throw new Error("Invalid LngLat object: ("+t+", "+e+")");if(this.lng=+t,this.lat=+e,this.lat>90||this.lat<-90)throw new Error("Invalid LngLat latitude value: must be between -90 and 90")};Wc.prototype.wrap=function(){return new Wc(c(this.lng,-180,180),this.lat)},Wc.prototype.toArray=function(){return[this.lng,this.lat]},Wc.prototype.toString=function(){return"LngLat("+this.lng+", "+this.lat+")"},Wc.prototype.distanceTo=function(t){var e=Math.PI/180,r=this.lat*e,n=t.lat*e,i=Math.sin(r)*Math.sin(n)+Math.cos(r)*Math.cos(n)*Math.cos((t.lng-this.lng)*e);return 6371008.8*Math.acos(Math.min(i,1))},Wc.prototype.toBounds=function(t){void 0===t&&(t=0);var e=360*t/40075017,r=e/Math.cos(Math.PI/180*this.lat);return new Yc(new Wc(this.lng-r,this.lat-e),new Wc(this.lng+r,this.lat+e))},Wc.convert=function(t){if(t instanceof Wc)return t;if(Array.isArray(t)&&(2===t.length||3===t.length))return new Wc(Number(t[0]),Number(t[1]));if(!Array.isArray(t)&&"object"==typeof t&&null!==t)return new Wc(Number("lng"in t?t.lng:t.lon),Number(t.lat));throw new Error("`LngLatLike` argument must be specified as a LngLat instance, an object {lng: , lat: }, an object {lon: , lat: }, or an array of [, ]")};var Xc=2*Math.PI*6371008.8;function Zc(t){return Xc*Math.cos(t*Math.PI/180)}function Jc(t){return(180+t)/360}function Kc(t){return(180-180/Math.PI*Math.log(Math.tan(Math.PI/4+t*Math.PI/360)))/360}function Qc(t,e){return t/Zc(e)}function $c(t){var e=180-360*t;return 360/Math.PI*Math.atan(Math.exp(e*Math.PI/180))-90}var tu=function(t,e,r){void 0===r&&(r=0),this.x=+t,this.y=+e,this.z=+r};tu.fromLngLat=function(t,e){void 0===e&&(e=0);var r=Wc.convert(t);return new tu(Jc(r.lng),Kc(r.lat),Qc(e,r.lat))},tu.prototype.toLngLat=function(){return new Wc(360*this.x-180,$c(this.y))},tu.prototype.toAltitude=function(){return t=this.z,e=this.y,t*Zc($c(e));var t,e},tu.prototype.meterInMercatorCoordinateUnits=function(){return 1/Xc*(t=$c(this.y),1/Math.cos(t*Math.PI/180));var t};var eu=function(t,e,r){this.z=t,this.x=e,this.y=r,this.key=iu(0,t,t,e,r)};eu.prototype.equals=function(t){return this.z===t.z&&this.x===t.x&&this.y===t.y},eu.prototype.url=function(t,e){var r,n,i,a,o,s=(r=this.x,n=this.y,i=this.z,a=Gc(256*r,256*(n=Math.pow(2,i)-n-1),i),o=Gc(256*(r+1),256*(n+1),i),a[0]+","+a[1]+","+o[0]+","+o[1]),l=function(t,e,r){for(var n,i="",a=t;a>0;a--)i+=(e&(n=1<this.canonical.z?new nu(t,this.wrap,this.canonical.z,this.canonical.x,this.canonical.y):new nu(t,this.wrap,t,this.canonical.x>>e,this.canonical.y>>e)},nu.prototype.calculateScaledKey=function(t,e){var r=this.canonical.z-t;return t>this.canonical.z?iu(this.wrap*+e,t,this.canonical.z,this.canonical.x,this.canonical.y):iu(this.wrap*+e,t,t,this.canonical.x>>r,this.canonical.y>>r)},nu.prototype.isChildOf=function(t){if(t.wrap!==this.wrap)return!1;var e=this.canonical.z-t.canonical.z;return 0===t.overscaledZ||t.overscaledZ>e&&t.canonical.y===this.canonical.y>>e},nu.prototype.children=function(t){if(this.overscaledZ>=t)return[new nu(this.overscaledZ+1,this.wrap,this.canonical.z,this.canonical.x,this.canonical.y)];var e=this.canonical.z+1,r=2*this.canonical.x,n=2*this.canonical.y;return[new nu(e,this.wrap,e,r,n),new nu(e,this.wrap,e,r+1,n),new nu(e,this.wrap,e,r,n+1),new nu(e,this.wrap,e,r+1,n+1)]},nu.prototype.isLessThan=function(t){return this.wrapt.wrap)&&(this.overscaledZt.overscaledZ)&&(this.canonical.xt.canonical.x)&&this.canonical.y=this.dim+1||e<-1||e>=this.dim+1)throw new RangeError("out of range source coordinates for DEM data");return(e+1)*this.stride+(t+1)},au.prototype._unpackMapbox=function(t,e,r){return(256*t*256+256*e+r)/10-1e4},au.prototype._unpackTerrarium=function(t,e,r){return 256*t+e+r/256-32768},au.prototype.getPixels=function(){return new Eo({width:this.stride,height:this.stride},new Uint8Array(this.data.buffer))},au.prototype.backfillBorder=function(t,e,r){if(this.dim!==t.dim)throw new Error("dem dimension mismatch");var n=e*this.dim,i=e*this.dim+this.dim,a=r*this.dim,o=r*this.dim+this.dim;switch(e){case-1:n=i-1;break;case 1:i=n+1}switch(r){case-1:a=o-1;break;case 1:o=a+1}for(var s=-e*this.dim,l=-r*this.dim,c=a;c=0&&u[3]>=0&&s.insert(o,u[0],u[1],u[2],u[3])}},uu.prototype.loadVTLayers=function(){return this.vtLayers||(this.vtLayers=new Ls.VectorTile(new al(this.rawTileData)).layers,this.sourceLayerCoder=new ou(this.vtLayers?Object.keys(this.vtLayers).sort():["_geojsonTileLayer"])),this.vtLayers},uu.prototype.query=function(t,e,r,n){var a=this;this.loadVTLayers();for(var o=t.params||{},s=8192/t.tileSize/t.scale,l=sn(o.filter),c=t.queryGeometry,u=t.queryPadding*s,f=hu(c),h=this.grid.query(f.minX-u,f.minY-u,f.maxX+u,f.maxY+u),p=hu(t.cameraQueryGeometry),d=this.grid3D.query(p.minX-u,p.minY-u,p.maxX+u,p.maxY+u,(function(e,r,n,a){return function(t,e,r,n,a){for(var o=0,s=t;o=l.x&&a>=l.y)return!0}var c=[new i(e,r),new i(e,a),new i(n,a),new i(n,r)];if(t.length>2)for(var u=0,f=c;u=0)return!0;return!1}(a,f)){var h=this.sourceLayerCoder.decode(r),p=this.vtLayers[h].feature(n);if(i.filter(new pi(this.tileID.overscaledZ),p))for(var d=this.getId(p,h),m=0;mn)i=!1;else if(e)if(this.expirationTimeot&&(t.getActor().send("enforceCacheSizeLimit",at),ht=0)},t.clamp=l,t.clearTileCache=function(t){var e=self.caches.delete("mapbox-tiles");t&&e.catch(t).then((function(){return t()}))},t.clipLine=ec,t.clone=function(t){var e=new fo(16);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e[3]=t[3],e[4]=t[4],e[5]=t[5],e[6]=t[6],e[7]=t[7],e[8]=t[8],e[9]=t[9],e[10]=t[10],e[11]=t[11],e[12]=t[12],e[13]=t[13],e[14]=t[14],e[15]=t[15],e},t.clone$1=x,t.clone$2=function(t){var e=new fo(3);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e},t.collisionCircleLayout=tl,t.config=F,t.create=function(){var t=new fo(16);return fo!=Float32Array&&(t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[11]=0,t[12]=0,t[13]=0,t[14]=0),t[0]=1,t[5]=1,t[10]=1,t[15]=1,t},t.create$1=function(){var t=new fo(9);return fo!=Float32Array&&(t[1]=0,t[2]=0,t[3]=0,t[5]=0,t[6]=0,t[7]=0),t[0]=1,t[4]=1,t[8]=1,t},t.create$2=function(){var t=new fo(4);return fo!=Float32Array&&(t[1]=0,t[2]=0),t[0]=1,t[3]=1,t},t.createCommonjsModule=e,t.createExpression=Wr,t.createLayout=Ii,t.createStyleLayer=function(t){return"custom"===t.type?new Dc(t):new Rc[t.type](t)},t.cross=function(t,e,r){var n=e[0],i=e[1],a=e[2],o=r[0],s=r[1],l=r[2];return t[0]=i*l-a*s,t[1]=a*o-n*l,t[2]=n*s-i*o,t},t.deepEqual=function t(e,r){if(Array.isArray(e)){if(!Array.isArray(r)||e.length!==r.length)return!1;for(var n=0;n0&&(a=1/Math.sqrt(a)),t[0]=e[0]*a,t[1]=e[1]*a,t[2]=e[2]*a,t},t.number=qe,t.offscreenCanvasSupported=pt,t.ortho=function(t,e,r,n,i,a,o){var s=1/(e-r),l=1/(n-i),c=1/(a-o);return t[0]=-2*s,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=-2*l,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=2*c,t[11]=0,t[12]=(e+r)*s,t[13]=(i+n)*l,t[14]=(o+a)*c,t[15]=1,t},t.parseGlyphPBF=function(t){return new al(t).readFields(Tl,[])},t.pbf=al,t.performSymbolLayout=function(t,e,r,n,i,a,o){t.createArrays();var s=512*t.overscaling;t.tilePixelRatio=8192/s,t.compareText={},t.iconsNeedLinear=!1;var l=t.layers[0].layout,c=t.layers[0]._unevaluatedLayout._values,u={};if("composite"===t.textSizeData.kind){var f=t.textSizeData,h=f.minZoom,p=f.maxZoom;u.compositeTextSizes=[c["text-size"].possiblyEvaluate(new pi(h),o),c["text-size"].possiblyEvaluate(new pi(p),o)]}if("composite"===t.iconSizeData.kind){var d=t.iconSizeData,m=d.minZoom,g=d.maxZoom;u.compositeIconSizes=[c["icon-size"].possiblyEvaluate(new pi(m),o),c["icon-size"].possiblyEvaluate(new pi(g),o)]}u.layoutTextSize=c["text-size"].possiblyEvaluate(new pi(t.zoom+1),o),u.layoutIconSize=c["icon-size"].possiblyEvaluate(new pi(t.zoom+1),o),u.textMaxSize=c["text-size"].possiblyEvaluate(new pi(18));for(var v=24*l.get("text-line-height"),y="map"===l.get("text-rotation-alignment")&&"point"!==l.get("symbol-placement"),x=l.get("text-keep-upright"),b=l.get("text-size"),w=function(){var a=k[T],s=l.get("text-font").evaluate(a,{},o).join(","),c=b.evaluate(a,{},o),f=u.layoutTextSize.evaluate(a,{},o),h=u.layoutIconSize.evaluate(a,{},o),p={horizontal:{},vertical:void 0},d=a.text,m=[0,0];if(d){var g=d.toString(),w=24*l.get("text-letter-spacing").evaluate(a,{},o),A=function(t){for(var e=0,r=t;e=8192||f.y<0||f.y>=8192||function(t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g,v,y,x,b,w,T,k,A){var M,S,E,L,C,P=t.addToLineVertexArray(e,r),I=0,O=0,z=0,D=0,R=-1,F=-1,B={},N=ya(""),j=0,U=0;void 0===s._unevaluatedLayout.getValue("text-radial-offset")?(M=s.layout.get("text-offset").evaluate(b,{},k).map((function(t){return 24*t})),j=M[0],U=M[1]):(j=24*s.layout.get("text-radial-offset").evaluate(b,{},k),U=pc);if(t.allowVerticalPlacement&&n.vertical){var V=s.layout.get("text-rotate").evaluate(b,{},k)+90,H=n.vertical;L=new sc(l,e,c,u,f,H,h,p,d,V),o&&(C=new sc(l,e,c,u,f,o,g,v,d,V))}if(i){var q=s.layout.get("icon-rotate").evaluate(b,{}),G="none"!==s.layout.get("icon-text-fit"),Y=rc(i,q,T,G),W=o?rc(o,q,T,G):void 0;E=new sc(l,e,c,u,f,i,g,v,!1,q),I=4*Y.length;var X=t.iconSizeData,Z=null;"source"===X.kind?(Z=[128*s.layout.get("icon-size").evaluate(b,{})])[0]>32640&&_(t.layerIds[0]+': Value for "icon-size" is >= 255. Reduce your "icon-size".'):"composite"===X.kind&&((Z=[128*w.compositeIconSizes[0].evaluate(b,{},k),128*w.compositeIconSizes[1].evaluate(b,{},k)])[0]>32640||Z[1]>32640)&&_(t.layerIds[0]+': Value for "icon-size" is >= 255. Reduce your "icon-size".'),t.addSymbols(t.icon,Y,Z,x,y,b,!1,e,P.lineStartIndex,P.lineLength,-1,k),R=t.icon.placedSymbolArray.length-1,W&&(O=4*W.length,t.addSymbols(t.icon,W,Z,x,y,b,Cl.vertical,e,P.lineStartIndex,P.lineLength,-1,k),F=t.icon.placedSymbolArray.length-1)}for(var J in n.horizontal){var K=n.horizontal[J];if(!S){N=ya(K.text);var Q=s.layout.get("text-rotate").evaluate(b,{},k);S=new sc(l,e,c,u,f,K,h,p,d,Q)}var $=1===K.positionedLines.length;if(z+=gc(t,e,K,a,s,d,b,m,P,n.vertical?Cl.horizontal:Cl.horizontalOnly,$?Object.keys(n.horizontal):[J],B,R,w,k),$)break}n.vertical&&(D+=gc(t,e,n.vertical,a,s,d,b,m,P,Cl.vertical,["vertical"],B,F,w,k));var tt=S?S.boxStartIndex:t.collisionBoxArray.length,et=S?S.boxEndIndex:t.collisionBoxArray.length,rt=L?L.boxStartIndex:t.collisionBoxArray.length,nt=L?L.boxEndIndex:t.collisionBoxArray.length,it=E?E.boxStartIndex:t.collisionBoxArray.length,at=E?E.boxEndIndex:t.collisionBoxArray.length,ot=C?C.boxStartIndex:t.collisionBoxArray.length,st=C?C.boxEndIndex:t.collisionBoxArray.length,lt=-1,ct=function(t,e){return t&&t.circleDiameter?Math.max(t.circleDiameter,e):e};lt=ct(S,lt),lt=ct(L,lt),lt=ct(E,lt);var ut=(lt=ct(C,lt))>-1?1:0;ut&&(lt*=A/24);t.glyphOffsetArray.length>=Mc.MAX_GLYPHS&&_("Too many glyphs being rendered in a tile. See https://github.com/mapbox/mapbox-gl-js/issues/2907");void 0!==b.sortKey&&t.addToSortKeyRanges(t.symbolInstances.length,b.sortKey);t.symbolInstances.emplaceBack(e.x,e.y,B.right>=0?B.right:-1,B.center>=0?B.center:-1,B.left>=0?B.left:-1,B.vertical||-1,R,F,N,tt,et,rt,nt,it,at,ot,st,c,z,D,I,O,ut,0,h,j,U,lt)}(t,f,s,r,n,i,h,t.layers[0],t.collisionBoxArray,e.index,e.sourceLayerIndex,t.index,v,w,A,l,x,T,M,d,e,a,c,u,o)};if("line"===S)for(var P=0,I=ec(e.geometry,0,0,8192,8192);P1){var V=$l(U,k,r.vertical||m,n,24,y);V&&C(U,V)}}else if("Polygon"===e.type)for(var H=0,q=hs(e.geometry,0);H=E.maxzoom))if("none"!==E.visibility)o(S,this.zoom,n),(m[E.id]=E.createBucket({index:u.bucketLayerIDs.length,layers:S,zoom:this.zoom,pixelRatio:this.pixelRatio,overscaling:this.overscaling,collisionBoxArray:this.collisionBoxArray,sourceLayerIndex:b,sourceID:this.source})).populate(_,g,this.tileID.canonical),u.bucketLayerIDs.push(S.map((function(t){return t.id})))}}}var L=t.mapObject(g.glyphDependencies,(function(t){return Object.keys(t).map(Number)}));Object.keys(L).length?a.send("getGlyphs",{uid:this.uid,stacks:L},(function(t,e){f||(f=t,h=e,I.call(l))})):h={};var C=Object.keys(g.iconDependencies);C.length?a.send("getImages",{icons:C,source:this.source,tileID:this.tileID,type:"icons"},(function(t,e){f||(f=t,p=e,I.call(l))})):p={};var P=Object.keys(g.patternDependencies);function I(){if(f)return s(f);if(h&&p&&d){var e=new i(h),r=new t.ImageAtlas(p,d);for(var a in m){var l=m[a];l instanceof t.SymbolBucket?(o(l.layers,this.zoom,n),t.performSymbolLayout(l,h,e.positions,p,r.iconPositions,this.showCollisionBoxes,this.tileID.canonical)):l.hasPattern&&(l instanceof t.LineBucket||l instanceof t.FillBucket||l instanceof t.FillExtrusionBucket)&&(o(l.layers,this.zoom,n),l.addFeatures(g,this.tileID.canonical,r.patternPositions))}this.status="done",s(null,{buckets:t.values(m).filter((function(t){return!t.isEmpty()})),featureIndex:u,collisionBoxArray:this.collisionBoxArray,glyphAtlasImage:e.image,imageAtlas:r,glyphMap:this.returnDependencies?h:null,iconMap:this.returnDependencies?p:null,glyphPositions:this.returnDependencies?e.positions:null})}}P.length?a.send("getImages",{icons:P,source:this.source,tileID:this.tileID,type:"patterns"},(function(t,e){f||(f=t,d=e,I.call(l))})):d={},I.call(this)};var l=function(t,e,r,n){this.actor=t,this.layerIndex=e,this.availableImages=r,this.loadVectorData=n||s,this.loading={},this.loaded={}};l.prototype.loadTile=function(e,r){var n=this,i=e.uid;this.loading||(this.loading={});var o=!!(e&&e.request&&e.request.collectResourceTiming)&&new t.RequestPerformance(e.request),s=this.loading[i]=new a(e);s.abort=this.loadVectorData(e,(function(e,a){if(delete n.loading[i],e||!a)return s.status="done",n.loaded[i]=s,r(e);var l=a.rawData,c={};a.expires&&(c.expires=a.expires),a.cacheControl&&(c.cacheControl=a.cacheControl);var u={};if(o){var f=o.finish();f&&(u.resourceTiming=JSON.parse(JSON.stringify(f)))}s.vectorTile=a.vectorTile,s.parse(a.vectorTile,n.layerIndex,n.availableImages,n.actor,(function(e,n){if(e||!n)return r(e);r(null,t.extend({rawTileData:l.slice(0)},n,c,u))})),n.loaded=n.loaded||{},n.loaded[i]=s}))},l.prototype.reloadTile=function(t,e){var r=this,n=this.loaded,i=t.uid,a=this;if(n&&n[i]){var o=n[i];o.showCollisionBoxes=t.showCollisionBoxes;var s=function(t,n){var i=o.reloadCallback;i&&(delete o.reloadCallback,o.parse(o.vectorTile,a.layerIndex,r.availableImages,a.actor,i)),e(t,n)};"parsing"===o.status?o.reloadCallback=s:"done"===o.status&&(o.vectorTile?o.parse(o.vectorTile,this.layerIndex,this.availableImages,this.actor,s):s())}},l.prototype.abortTile=function(t,e){var r=this.loading,n=t.uid;r&&r[n]&&r[n].abort&&(r[n].abort(),delete r[n]),e()},l.prototype.removeTile=function(t,e){var r=this.loaded,n=t.uid;r&&r[n]&&delete r[n],e()};var c=t.window.ImageBitmap,u=function(){this.loaded={}};u.prototype.loadTile=function(e,r){var n=e.uid,i=e.encoding,a=e.rawImageData,o=c&&a instanceof c?this.getImageData(a):a,s=new t.DEMData(n,o,i);this.loaded=this.loaded||{},this.loaded[n]=s,r(null,s)},u.prototype.getImageData=function(e){this.offscreenCanvas&&this.offscreenCanvasContext||(this.offscreenCanvas=new OffscreenCanvas(e.width,e.height),this.offscreenCanvasContext=this.offscreenCanvas.getContext("2d")),this.offscreenCanvas.width=e.width,this.offscreenCanvas.height=e.height,this.offscreenCanvasContext.drawImage(e,0,0,e.width,e.height);var r=this.offscreenCanvasContext.getImageData(-1,-1,e.width+2,e.height+2);return this.offscreenCanvasContext.clearRect(0,0,this.offscreenCanvas.width,this.offscreenCanvas.height),new t.RGBAImage({width:r.width,height:r.height},r.data)},u.prototype.removeTile=function(t){var e=this.loaded,r=t.uid;e&&e[r]&&delete e[r]};var f=function t(e,r){var n,i=e&&e.type;if("FeatureCollection"===i)for(n=0;n=0!=!!e&&t.reverse()}var d=t.vectorTile.VectorTileFeature.prototype.toGeoJSON,m=function(e){this._feature=e,this.extent=t.EXTENT,this.type=e.type,this.properties=e.tags,"id"in e&&!isNaN(e.id)&&(this.id=parseInt(e.id,10))};m.prototype.loadGeometry=function(){if(1===this._feature.type){for(var e=[],r=0,n=this._feature.geometry;r>31}function P(t,e){for(var r=t.loadGeometry(),n=t.type,i=0,a=0,o=r.length,s=0;s>1;!function t(e,r,n,i,a,o){for(;a>i;){if(a-i>600){var s=a-i+1,l=n-i+1,c=Math.log(s),u=.5*Math.exp(2*c/3),f=.5*Math.sqrt(c*u*(s-u)/s)*(l-s/2<0?-1:1),h=Math.max(i,Math.floor(n-l*u/s+f)),p=Math.min(a,Math.floor(n+(s-l)*u/s+f));t(e,r,n,h,p,o)}var d=r[2*n+o],m=i,g=a;for(z(e,r,i,n),r[2*a+o]>d&&z(e,r,i,a);md;)g--}r[2*i+o]===d?z(e,r,i,g):(g++,z(e,r,g,a)),g<=n&&(i=g+1),n<=g&&(a=g-1)}}(t,e,o,n,i,a%2),O(t,e,r,n,o-1,a+1),O(t,e,r,o+1,i,a+1)}}function z(t,e,r,n){D(t,r,n),D(e,2*r,2*n),D(e,2*r+1,2*n+1)}function D(t,e,r){var n=t[e];t[e]=t[r],t[r]=n}function R(t,e,r,n){var i=t-r,a=e-n;return i*i+a*a}_.fromVectorTileJs=w,_.fromGeojsonVt=T,_.GeoJSONWrapper=k;var F=function(t){return t[0]},B=function(t){return t[1]},N=function(t,e,r,n,i){void 0===e&&(e=F),void 0===r&&(r=B),void 0===n&&(n=64),void 0===i&&(i=Float64Array),this.nodeSize=n,this.points=t;for(var a=t.length<65536?Uint16Array:Uint32Array,o=this.ids=new a(t.length),s=this.coords=new i(2*t.length),l=0;l=r&&s<=i&&l>=n&&l<=a&&u.push(t[d]);else{var m=Math.floor((p+h)/2);s=e[2*m],l=e[2*m+1],s>=r&&s<=i&&l>=n&&l<=a&&u.push(t[m]);var g=(f+1)%2;(0===f?r<=s:n<=l)&&(c.push(p),c.push(m-1),c.push(g)),(0===f?i>=s:a>=l)&&(c.push(m+1),c.push(h),c.push(g))}}return u}(this.ids,this.coords,t,e,r,n,this.nodeSize)},N.prototype.within=function(t,e,r){return function(t,e,r,n,i,a){for(var o=[0,t.length-1,0],s=[],l=i*i;o.length;){var c=o.pop(),u=o.pop(),f=o.pop();if(u-f<=a)for(var h=f;h<=u;h++)R(e[2*h],e[2*h+1],r,n)<=l&&s.push(t[h]);else{var p=Math.floor((f+u)/2),d=e[2*p],m=e[2*p+1];R(d,m,r,n)<=l&&s.push(t[p]);var g=(c+1)%2;(0===c?r-i<=d:n-i<=m)&&(o.push(f),o.push(p-1),o.push(g)),(0===c?r+i>=d:n+i>=m)&&(o.push(p+1),o.push(u),o.push(g))}}return s}(this.ids,this.coords,t,e,r,this.nodeSize)};var j={minZoom:0,maxZoom:16,radius:40,extent:512,nodeSize:64,log:!1,generateId:!1,reduce:null,map:function(t){return t}},U=function(t){this.options=X(Object.create(j),t),this.trees=new Array(this.options.maxZoom+1)};function V(t,e,r,n,i){return{x:t,y:e,zoom:1/0,id:r,parentId:-1,numPoints:n,properties:i}}function H(t,e){var r=t.geometry.coordinates,n=r[0],i=r[1];return{x:Y(n),y:W(i),zoom:1/0,index:e,parentId:-1}}function q(t){return{type:"Feature",id:t.id,properties:G(t),geometry:{type:"Point",coordinates:[(n=t.x,360*(n-.5)),(e=t.y,r=(180-360*e)*Math.PI/180,360*Math.atan(Math.exp(r))/Math.PI-90)]}};var e,r,n}function G(t){var e=t.numPoints,r=e>=1e4?Math.round(e/1e3)+"k":e>=1e3?Math.round(e/100)/10+"k":e;return X(X({},t.properties),{cluster:!0,cluster_id:t.id,point_count:e,point_count_abbreviated:r})}function Y(t){return t/360+.5}function W(t){var e=Math.sin(t*Math.PI/180),r=.5-.25*Math.log((1+e)/(1-e))/Math.PI;return r<0?0:r>1?1:r}function X(t,e){for(var r in e)t[r]=e[r];return t}function Z(t){return t.x}function J(t){return t.y}function K(t,e,r,n,i,a){var o=i-r,s=a-n;if(0!==o||0!==s){var l=((t-r)*o+(e-n)*s)/(o*o+s*s);l>1?(r=i,n=a):l>0&&(r+=o*l,n+=s*l)}return(o=t-r)*o+(s=e-n)*s}function Q(t,e,r,n){var i={id:void 0===t?null:t,type:e,geometry:r,tags:n,minX:1/0,minY:1/0,maxX:-1/0,maxY:-1/0};return function(t){var e=t.geometry,r=t.type;if("Point"===r||"MultiPoint"===r||"LineString"===r)$(t,e);else if("Polygon"===r||"MultiLineString"===r)for(var n=0;n0&&(o+=n?(i*c-l*a)/2:Math.sqrt(Math.pow(l-i,2)+Math.pow(c-a,2))),i=l,a=c}var u=e.length-3;e[2]=1,function t(e,r,n,i){for(var a,o=i,s=n-r>>1,l=n-r,c=e[r],u=e[r+1],f=e[n],h=e[n+1],p=r+3;po)a=p,o=d;else if(d===o){var m=Math.abs(p-s);mi&&(a-r>3&&t(e,r,a,i),e[a+2]=o,n-a>3&&t(e,a,n,i))}(e,0,u,r),e[u+2]=1,e.size=Math.abs(o),e.start=0,e.end=e.size}function nt(t,e,r,n){for(var i=0;i1?1:r}function ot(t,e,r,n,i,a,o,s){if(n/=e,a>=(r/=e)&&o=n)return null;for(var l=[],c=0;c=r&&d=n)){var m=[];if("Point"===h||"MultiPoint"===h)st(f,m,r,n,i);else if("LineString"===h)lt(f,m,r,n,i,!1,s.lineMetrics);else if("MultiLineString"===h)ut(f,m,r,n,i,!1);else if("Polygon"===h)ut(f,m,r,n,i,!0);else if("MultiPolygon"===h)for(var g=0;g=r&&o<=n&&(e.push(t[a]),e.push(t[a+1]),e.push(t[a+2]))}}function lt(t,e,r,n,i,a,o){for(var s,l,c=ct(t),u=0===i?ht:pt,f=t.start,h=0;hr&&(l=u(c,p,d,g,v,r),o&&(c.start=f+s*l)):y>n?x=r&&(l=u(c,p,d,g,v,r),b=!0),x>n&&y<=n&&(l=u(c,p,d,g,v,n),b=!0),!a&&b&&(o&&(c.end=f+s*l),e.push(c),c=ct(t)),o&&(f+=s)}var _=t.length-3;p=t[_],d=t[_+1],m=t[_+2],(y=0===i?p:d)>=r&&y<=n&&ft(c,p,d,m),_=c.length-3,a&&_>=3&&(c[_]!==c[0]||c[_+1]!==c[1])&&ft(c,c[0],c[1],c[2]),c.length&&e.push(c)}function ct(t){var e=[];return e.size=t.size,e.start=t.start,e.end=t.end,e}function ut(t,e,r,n,i,a){for(var o=0;oo.maxX&&(o.maxX=u),f>o.maxY&&(o.maxY=f)}return o}function xt(t,e,r,n){var i=e.geometry,a=e.type,o=[];if("Point"===a||"MultiPoint"===a)for(var s=0;s0&&e.size<(i?o:n))r.numPoints+=e.length/3;else{for(var s=[],l=0;lo)&&(r.numSimplified++,s.push(e[l]),s.push(e[l+1])),r.numPoints++;i&&function(t,e){for(var r=0,n=0,i=t.length,a=i-2;n0===e)for(n=0,i=t.length;n24)throw new Error("maxZoom should be in the 0-24 range");if(e.promoteId&&e.generateId)throw new Error("promoteId and generateId cannot be used together.");var n=function(t,e){var r=[];if("FeatureCollection"===t.type)for(var n=0;n=n;c--){var u=+Date.now();s=this._cluster(s,c),this.trees[c]=new N(s,Z,J,a,Float32Array),r&&console.log("z%d: %d clusters in %dms",c,s.length,+Date.now()-u)}return r&&console.timeEnd("total time"),this},U.prototype.getClusters=function(t,e){var r=((t[0]+180)%360+360)%360-180,n=Math.max(-90,Math.min(90,t[1])),i=180===t[2]?180:((t[2]+180)%360+360)%360-180,a=Math.max(-90,Math.min(90,t[3]));if(t[2]-t[0]>=360)r=-180,i=180;else if(r>i){var o=this.getClusters([r,n,180,a],e),s=this.getClusters([-180,n,i,a],e);return o.concat(s)}for(var l=this.trees[this._limitZoom(e)],c=[],u=0,f=l.range(Y(r),W(a),Y(i),W(n));u1?this._map(c,!0):null,g=(l<<5)+(e+1)+this.points.length,v=0,y=f;v>5},U.prototype._getOriginZoom=function(t){return(t-this.points.length)%32},U.prototype._map=function(t,e){if(t.numPoints)return e?X({},t.properties):t.properties;var r=this.points[t.index].properties,n=this.options.map(r);return e&&n===r?X({},n):n},_t.prototype.options={maxZoom:14,indexMaxZoom:5,indexMaxPoints:1e5,tolerance:3,extent:4096,buffer:64,lineMetrics:!1,promoteId:null,generateId:!1,debug:0},_t.prototype.splitTile=function(t,e,r,n,i,a,o){for(var s=[t,e,r,n],l=this.options,c=l.debug;s.length;){n=s.pop(),r=s.pop(),e=s.pop(),t=s.pop();var u=1<1&&console.time("creation"),h=this.tiles[f]=yt(t,e,r,n,l),this.tileCoords.push({z:e,x:r,y:n}),c)){c>1&&(console.log("tile z%d-%d-%d (features: %d, points: %d, simplified: %d)",e,r,n,h.numFeatures,h.numPoints,h.numSimplified),console.timeEnd("creation"));var p="z"+e;this.stats[p]=(this.stats[p]||0)+1,this.total++}if(h.source=t,i){if(e===l.maxZoom||e===i)continue;var d=1<1&&console.time("clipping");var m,g,v,y,x,b,_=.5*l.buffer/l.extent,w=.5-_,T=.5+_,k=1+_;m=g=v=y=null,x=ot(t,u,r-_,r+T,0,h.minX,h.maxX,l),b=ot(t,u,r+w,r+k,0,h.minX,h.maxX,l),t=null,x&&(m=ot(x,u,n-_,n+T,1,h.minY,h.maxY,l),g=ot(x,u,n+w,n+k,1,h.minY,h.maxY,l),x=null),b&&(v=ot(b,u,n-_,n+T,1,h.minY,h.maxY,l),y=ot(b,u,n+w,n+k,1,h.minY,h.maxY,l),b=null),c>1&&console.timeEnd("clipping"),s.push(m||[],e+1,2*r,2*n),s.push(g||[],e+1,2*r,2*n+1),s.push(v||[],e+1,2*r+1,2*n),s.push(y||[],e+1,2*r+1,2*n+1)}}},_t.prototype.getTile=function(t,e,r){var n=this.options,i=n.extent,a=n.debug;if(t<0||t>24)return null;var o=1<1&&console.log("drilling down to z%d-%d-%d",t,e,r);for(var l,c=t,u=e,f=r;!l&&c>0;)c--,u=Math.floor(u/2),f=Math.floor(f/2),l=this.tiles[wt(c,u,f)];return l&&l.source?(a>1&&console.log("found parent tile z%d-%d-%d",c,u,f),a>1&&console.time("drilling down"),this.splitTile(l.source,c,u,f,t,e,r),a>1&&console.timeEnd("drilling down"),this.tiles[s]?gt(this.tiles[s],i):null):null};var kt=function(e){function r(t,r,n,i){e.call(this,t,r,n,Tt),i&&(this.loadGeoJSON=i)}return e&&(r.__proto__=e),r.prototype=Object.create(e&&e.prototype),r.prototype.constructor=r,r.prototype.loadData=function(t,e){this._pendingCallback&&this._pendingCallback(null,{abandoned:!0}),this._pendingCallback=e,this._pendingLoadDataParams=t,this._state&&"Idle"!==this._state?this._state="NeedsLoadData":(this._state="Coalescing",this._loadData())},r.prototype._loadData=function(){var e=this;if(this._pendingCallback&&this._pendingLoadDataParams){var r=this._pendingCallback,n=this._pendingLoadDataParams;delete this._pendingCallback,delete this._pendingLoadDataParams;var i=!!(n&&n.request&&n.request.collectResourceTiming)&&new t.RequestPerformance(n.request);this.loadGeoJSON(n,(function(a,o){if(a||!o)return r(a);if("object"!=typeof o)return r(new Error("Input data given to '"+n.source+"' is not a valid GeoJSON object."));f(o,!0);try{e._geoJSONIndex=n.cluster?new U(function(e){var r=e.superclusterOptions,n=e.clusterProperties;if(!n||!r)return r;for(var i={},a={},o={accumulated:null,zoom:0},s={properties:null},l=Object.keys(n),c=0,u=l;c=0?0:e.button},r.remove=function(t){t.parentNode&&t.parentNode.removeChild(t)};var h=function(e){function r(){e.call(this),this.images={},this.updatedImages={},this.callbackDispatchedThisFrame={},this.loaded=!1,this.requestors=[],this.patterns={},this.atlasImage=new t.RGBAImage({width:1,height:1}),this.dirty=!0}return e&&(r.__proto__=e),r.prototype=Object.create(e&&e.prototype),r.prototype.constructor=r,r.prototype.isLoaded=function(){return this.loaded},r.prototype.setLoaded=function(t){if(this.loaded!==t&&(this.loaded=t,t)){for(var e=0,r=this.requestors;e=0?1.2:1))}function v(t,e,r,n,i,a,o){for(var s=0;s65535)e(new Error("glyphs > 65535 not supported"));else if(a.ranges[s])e(null,{stack:r,id:i,glyph:o});else{var l=a.requests[s];l||(l=a.requests[s]=[],x.loadGlyphRange(r,s,n.url,n.requestManager,(function(t,e){if(e){for(var r in e)n._doesCharSupportLocalGlyph(+r)||(a.glyphs[+r]=e[+r]);a.ranges[s]=!0}for(var i=0,o=l;i1&&(l=t[++s]);var u=Math.abs(c-l.left),f=Math.abs(c-l.right),h=Math.min(u,f),p=void 0,d=i/r*(n+1);if(l.isDash){var m=n-Math.abs(d);p=Math.sqrt(h*h+m*m)}else p=n-Math.sqrt(h*h+d*d);this.data[o+c]=Math.max(0,Math.min(255,p+128))}},T.prototype.addRegularDash=function(t){for(var e=t.length-1;e>=0;--e){var r=t[e],n=t[e+1];r.zeroLength?t.splice(e,1):n&&n.isDash===r.isDash&&(n.left=r.left,t.splice(e,1))}var i=t[0],a=t[t.length-1];i.isDash===a.isDash&&(i.left=a.left-this.width,a.right=i.right+this.width);for(var o=this.width*this.nextRow,s=0,l=t[s],c=0;c1&&(l=t[++s]);var u=Math.abs(c-l.left),f=Math.abs(c-l.right),h=Math.min(u,f),p=l.isDash?h:-h;this.data[o+c]=Math.max(0,Math.min(255,p+128))}},T.prototype.addDash=function(e,r){var n=r?7:0,i=2*n+1;if(this.nextRow+i>this.height)return t.warnOnce("LineAtlas out of space"),null;for(var a=0,o=0;o=n&&e.x=i&&e.y0&&(l[new t.OverscaledTileID(e.overscaledZ,a,r.z,i,r.y-1).key]={backfilled:!1},l[new t.OverscaledTileID(e.overscaledZ,e.wrap,r.z,r.x,r.y-1).key]={backfilled:!1},l[new t.OverscaledTileID(e.overscaledZ,s,r.z,o,r.y-1).key]={backfilled:!1}),r.y+10&&(n.resourceTiming=e._resourceTiming,e._resourceTiming=[]),e.fire(new t.Event("data",n))}}))},r.prototype.onAdd=function(t){this.map=t,this.load()},r.prototype.setData=function(e){var r=this;return this._data=e,this.fire(new t.Event("dataloading",{dataType:"source"})),this._updateWorkerData((function(e){if(e)r.fire(new t.ErrorEvent(e));else{var n={dataType:"source",sourceDataType:"content"};r._collectResourceTiming&&r._resourceTiming&&r._resourceTiming.length>0&&(n.resourceTiming=r._resourceTiming,r._resourceTiming=[]),r.fire(new t.Event("data",n))}})),this},r.prototype.getClusterExpansionZoom=function(t,e){return this.actor.send("geojson.getClusterExpansionZoom",{clusterId:t,source:this.id},e),this},r.prototype.getClusterChildren=function(t,e){return this.actor.send("geojson.getClusterChildren",{clusterId:t,source:this.id},e),this},r.prototype.getClusterLeaves=function(t,e,r,n){return this.actor.send("geojson.getClusterLeaves",{source:this.id,clusterId:t,limit:e,offset:r},n),this},r.prototype._updateWorkerData=function(e){var r=this;this._loaded=!1;var n=t.extend({},this.workerOptions),i=this._data;"string"==typeof i?(n.request=this.map._requestManager.transformRequest(t.browser.resolveURL(i),t.ResourceType.Source),n.request.collectResourceTiming=this._collectResourceTiming):n.data=JSON.stringify(i),this.actor.send(this.type+".loadData",n,(function(t,i){r._removed||i&&i.abandoned||(r._loaded=!0,i&&i.resourceTiming&&i.resourceTiming[r.id]&&(r._resourceTiming=i.resourceTiming[r.id].slice(0)),r.actor.send(r.type+".coalesce",{source:n.source},null),e(t))}))},r.prototype.loaded=function(){return this._loaded},r.prototype.loadTile=function(e,r){var n=this,i=e.actor?"reloadTile":"loadTile";e.actor=this.actor;var a={type:this.type,uid:e.uid,tileID:e.tileID,zoom:e.tileID.overscaledZ,maxZoom:this.maxzoom,tileSize:this.tileSize,source:this.id,pixelRatio:t.browser.devicePixelRatio,showCollisionBoxes:this.map.showCollisionBoxes,promoteId:this.promoteId};e.request=this.actor.send(i,a,(function(t,a){return delete e.request,e.unloadVectorData(),e.aborted?r(null):t?r(t):(e.loadVectorData(a,n.map.painter,"reloadTile"===i),r(null))}))},r.prototype.abortTile=function(t){t.request&&(t.request.cancel(),delete t.request),t.aborted=!0},r.prototype.unloadTile=function(t){t.unloadVectorData(),this.actor.send("removeTile",{uid:t.uid,type:this.type,source:this.id})},r.prototype.onRemove=function(){this._removed=!0,this.actor.send("removeSource",{type:this.type,source:this.id})},r.prototype.serialize=function(){return t.extend({},this._options,{type:this.type,data:this._data})},r.prototype.hasTransition=function(){return!1},r}(t.Evented),P=t.createLayout([{name:"a_pos",type:"Int16",components:2},{name:"a_texture_pos",type:"Int16",components:2}]),I=function(e){function r(t,r,n,i){e.call(this),this.id=t,this.dispatcher=n,this.coordinates=r.coordinates,this.type="image",this.minzoom=0,this.maxzoom=22,this.tileSize=512,this.tiles={},this._loaded=!1,this.setEventedParent(i),this.options=r}return e&&(r.__proto__=e),r.prototype=Object.create(e&&e.prototype),r.prototype.constructor=r,r.prototype.load=function(e,r){var n=this;this._loaded=!1,this.fire(new t.Event("dataloading",{dataType:"source"})),this.url=this.options.url,t.getImage(this.map._requestManager.transformRequest(this.url,t.ResourceType.Image),(function(i,a){n._loaded=!0,i?n.fire(new t.ErrorEvent(i)):a&&(n.image=a,e&&(n.coordinates=e),r&&r(),n._finishLoading())}))},r.prototype.loaded=function(){return this._loaded},r.prototype.updateImage=function(t){var e=this;return this.image&&t.url?(this.options.url=t.url,this.load(t.coordinates,(function(){e.texture=null})),this):this},r.prototype._finishLoading=function(){this.map&&(this.setCoordinates(this.coordinates),this.fire(new t.Event("data",{dataType:"source",sourceDataType:"metadata"})))},r.prototype.onAdd=function(t){this.map=t,this.load()},r.prototype.setCoordinates=function(e){var r=this;this.coordinates=e;var n=e.map(t.MercatorCoordinate.fromLngLat);this.tileID=function(e){for(var r=1/0,n=1/0,i=-1/0,a=-1/0,o=0,s=e;or.end(0)?this.fire(new t.ErrorEvent(new t.ValidationError("sources."+this.id,null,"Playback for this video can be set only between the "+r.start(0)+" and "+r.end(0)+"-second mark."))):this.video.currentTime=e}},r.prototype.getVideo=function(){return this.video},r.prototype.onAdd=function(t){this.map||(this.map=t,this.load(),this.video&&(this.video.play(),this.setCoordinates(this.coordinates)))},r.prototype.prepare=function(){if(!(0===Object.keys(this.tiles).length||this.video.readyState<2)){var e=this.map.painter.context,r=e.gl;for(var n in this.boundsBuffer||(this.boundsBuffer=e.createVertexBuffer(this._boundsArray,P.members)),this.boundsSegments||(this.boundsSegments=t.SegmentVector.simpleSegment(0,0,4,2)),this.texture?this.video.paused||(this.texture.bind(r.LINEAR,r.CLAMP_TO_EDGE),r.texSubImage2D(r.TEXTURE_2D,0,0,0,r.RGBA,r.UNSIGNED_BYTE,this.video)):(this.texture=new t.Texture(e,this.video,r.RGBA),this.texture.bind(r.LINEAR,r.CLAMP_TO_EDGE)),this.tiles){var i=this.tiles[n];"loaded"!==i.state&&(i.state="loaded",i.texture=this.texture)}}},r.prototype.serialize=function(){return{type:"video",urls:this.urls,coordinates:this.coordinates}},r.prototype.hasTransition=function(){return this.video&&!this.video.paused},r}(I),z=function(e){function r(r,n,i,a){e.call(this,r,n,i,a),n.coordinates?Array.isArray(n.coordinates)&&4===n.coordinates.length&&!n.coordinates.some((function(t){return!Array.isArray(t)||2!==t.length||t.some((function(t){return"number"!=typeof t}))}))||this.fire(new t.ErrorEvent(new t.ValidationError("sources."+r,null,'"coordinates" property must be an array of 4 longitude/latitude array pairs'))):this.fire(new t.ErrorEvent(new t.ValidationError("sources."+r,null,'missing required property "coordinates"'))),n.animate&&"boolean"!=typeof n.animate&&this.fire(new t.ErrorEvent(new t.ValidationError("sources."+r,null,'optional "animate" property must be a boolean value'))),n.canvas?"string"==typeof n.canvas||n.canvas instanceof t.window.HTMLCanvasElement||this.fire(new t.ErrorEvent(new t.ValidationError("sources."+r,null,'"canvas" must be either a string representing the ID of the canvas element from which to read, or an HTMLCanvasElement instance'))):this.fire(new t.ErrorEvent(new t.ValidationError("sources."+r,null,'missing required property "canvas"'))),this.options=n,this.animate=void 0===n.animate||n.animate}return e&&(r.__proto__=e),r.prototype=Object.create(e&&e.prototype),r.prototype.constructor=r,r.prototype.load=function(){this._loaded=!0,this.canvas||(this.canvas=this.options.canvas instanceof t.window.HTMLCanvasElement?this.options.canvas:t.window.document.getElementById(this.options.canvas)),this.width=this.canvas.width,this.height=this.canvas.height,this._hasInvalidDimensions()?this.fire(new t.ErrorEvent(new Error("Canvas dimensions cannot be less than or equal to zero."))):(this.play=function(){this._playing=!0,this.map.triggerRepaint()},this.pause=function(){this._playing&&(this.prepare(),this._playing=!1)},this._finishLoading())},r.prototype.getCanvas=function(){return this.canvas},r.prototype.onAdd=function(t){this.map=t,this.load(),this.canvas&&this.animate&&this.play()},r.prototype.onRemove=function(){this.pause()},r.prototype.prepare=function(){var e=!1;if(this.canvas.width!==this.width&&(this.width=this.canvas.width,e=!0),this.canvas.height!==this.height&&(this.height=this.canvas.height,e=!0),!this._hasInvalidDimensions()&&0!==Object.keys(this.tiles).length){var r=this.map.painter.context,n=r.gl;for(var i in this.boundsBuffer||(this.boundsBuffer=r.createVertexBuffer(this._boundsArray,P.members)),this.boundsSegments||(this.boundsSegments=t.SegmentVector.simpleSegment(0,0,4,2)),this.texture?(e||this._playing)&&this.texture.update(this.canvas,{premultiply:!0}):this.texture=new t.Texture(r,this.canvas,n.RGBA,{premultiply:!0}),this.tiles){var a=this.tiles[i];"loaded"!==a.state&&(a.state="loaded",a.texture=this.texture)}}},r.prototype.serialize=function(){return{type:"canvas",coordinates:this.coordinates}},r.prototype.hasTransition=function(){return this._playing},r.prototype._hasInvalidDimensions=function(){for(var t=0,e=[this.canvas.width,this.canvas.height];tthis.max){var o=this._getAndRemoveByKey(this.order[0]);o&&this.onRemove(o)}return this},N.prototype.has=function(t){return t.wrapped().key in this.data},N.prototype.getAndRemove=function(t){return this.has(t)?this._getAndRemoveByKey(t.wrapped().key):null},N.prototype._getAndRemoveByKey=function(t){var e=this.data[t].shift();return e.timeout&&clearTimeout(e.timeout),0===this.data[t].length&&delete this.data[t],this.order.splice(this.order.indexOf(t),1),e.value},N.prototype.getByKey=function(t){var e=this.data[t];return e?e[0].value:null},N.prototype.get=function(t){return this.has(t)?this.data[t.wrapped().key][0].value:null},N.prototype.remove=function(t,e){if(!this.has(t))return this;var r=t.wrapped().key,n=void 0===e?0:this.data[r].indexOf(e),i=this.data[r][n];return this.data[r].splice(n,1),i.timeout&&clearTimeout(i.timeout),0===this.data[r].length&&delete this.data[r],this.onRemove(i.value),this.order.splice(this.order.indexOf(r),1),this},N.prototype.setMaxSize=function(t){for(this.max=t;this.order.length>this.max;){var e=this._getAndRemoveByKey(this.order[0]);e&&this.onRemove(e)}return this},N.prototype.filter=function(t){var e=[];for(var r in this.data)for(var n=0,i=this.data[r];n1||(Math.abs(r)>1&&(1===Math.abs(r+i)?r+=i:1===Math.abs(r-i)&&(r-=i)),e.dem&&t.dem&&(t.dem.backfillBorder(e.dem,r,n),t.neighboringTiles&&t.neighboringTiles[a]&&(t.neighboringTiles[a].backfilled=!0)))}},r.prototype.getTile=function(t){return this.getTileByID(t.key)},r.prototype.getTileByID=function(t){return this._tiles[t]},r.prototype._retainLoadedChildren=function(t,e,r,n){for(var i in this._tiles){var a=this._tiles[i];if(!(n[i]||!a.hasData()||a.tileID.overscaledZ<=e||a.tileID.overscaledZ>r)){for(var o=a.tileID;a&&a.tileID.overscaledZ>e+1;){var s=a.tileID.scaledTo(a.tileID.overscaledZ-1);(a=this._tiles[s.key])&&a.hasData()&&(o=s)}for(var l=o;l.overscaledZ>e;)if(t[(l=l.scaledTo(l.overscaledZ-1)).key]){n[o.key]=o;break}}}},r.prototype.findLoadedParent=function(t,e){if(t.key in this._loadedParentTiles){var r=this._loadedParentTiles[t.key];return r&&r.tileID.overscaledZ>=e?r:null}for(var n=t.overscaledZ-1;n>=e;n--){var i=t.scaledTo(n),a=this._getLoadedTile(i);if(a)return a}},r.prototype._getLoadedTile=function(t){var e=this._tiles[t.key];return e&&e.hasData()?e:this._cache.getByKey(t.wrapped().key)},r.prototype.updateCacheSize=function(t){var e=(Math.ceil(t.width/this._source.tileSize)+1)*(Math.ceil(t.height/this._source.tileSize)+1),r=Math.floor(5*e),n="number"==typeof this._maxTileCacheSize?Math.min(this._maxTileCacheSize,r):r;this._cache.setMaxSize(n)},r.prototype.handleWrapJump=function(t){var e=(t-(void 0===this._prevLng?t:this._prevLng))/360,r=Math.round(e);if(this._prevLng=t,r){var n={};for(var i in this._tiles){var a=this._tiles[i];a.tileID=a.tileID.unwrapTo(a.tileID.wrap+r),n[a.tileID.key]=a}for(var o in this._tiles=n,this._timers)clearTimeout(this._timers[o]),delete this._timers[o];for(var s in this._tiles){var l=this._tiles[s];this._setTileReloadTimer(s,l)}}},r.prototype.update=function(e){var n=this;if(this.transform=e,this._sourceLoaded&&!this._paused){var i;this.updateCacheSize(e),this.handleWrapJump(this.transform.center.lng),this._coveredTiles={},this.used?this._source.tileID?i=e.getVisibleUnwrappedCoordinates(this._source.tileID).map((function(e){return new t.OverscaledTileID(e.canonical.z,e.wrap,e.canonical.z,e.canonical.x,e.canonical.y)})):(i=e.coveringTiles({tileSize:this._source.tileSize,minzoom:this._source.minzoom,maxzoom:this._source.maxzoom,roundZoom:this._source.roundZoom,reparseOverscaled:this._source.reparseOverscaled}),this._source.hasTile&&(i=i.filter((function(t){return n._source.hasTile(t)})))):i=[];var a=e.coveringZoomLevel(this._source),o=Math.max(a-r.maxOverzooming,this._source.minzoom),s=Math.max(a+r.maxUnderzooming,this._source.minzoom),l=this._updateRetainedTiles(i,a);if(It(this._source.type)){for(var c={},u={},f=0,h=Object.keys(l);fthis._source.maxzoom){var g=d.children(this._source.maxzoom)[0],v=this.getTile(g);if(v&&v.hasData()){n[g.key]=g;continue}}else{var y=d.children(this._source.maxzoom);if(n[y[0].key]&&n[y[1].key]&&n[y[2].key]&&n[y[3].key])continue}for(var x=m.wasRequested(),b=d.overscaledZ-1;b>=a;--b){var _=d.scaledTo(b);if(i[_.key])break;if(i[_.key]=!0,!(m=this.getTile(_))&&x&&(m=this._addTile(_)),m&&(n[_.key]=_,x=m.wasRequested(),m.hasData()))break}}}return n},r.prototype._updateLoadedParentTileCache=function(){for(var t in this._loadedParentTiles={},this._tiles){for(var e=[],r=void 0,n=this._tiles[t].tileID;n.overscaledZ>0;){if(n.key in this._loadedParentTiles){r=this._loadedParentTiles[n.key];break}e.push(n.key);var i=n.scaledTo(n.overscaledZ-1);if(r=this._getLoadedTile(i))break;n=i}for(var a=0,o=e;a0||(e.hasData()&&"reloading"!==e.state?this._cache.add(e.tileID,e,e.getExpiryTimeout()):(e.aborted=!0,this._abortTile(e),this._unloadTile(e))))},r.prototype.clearTiles=function(){for(var t in this._shouldReloadOnResume=!1,this._paused=!1,this._tiles)this._removeTile(t);this._cache.reset()},r.prototype.tilesIn=function(e,r,n){var i=this,a=[],o=this.transform;if(!o)return a;for(var s=n?o.getCameraQueryGeometry(e):e,l=e.map((function(t){return o.pointCoordinate(t)})),c=s.map((function(t){return o.pointCoordinate(t)})),u=this.getIds(),f=1/0,h=1/0,p=-1/0,d=-1/0,m=0,g=c;m=0&&v[1].y+g>=0){var y=l.map((function(t){return s.getTilePoint(t)})),x=c.map((function(t){return s.getTilePoint(t)}));a.push({tile:n,tileID:s,queryGeometry:y,cameraQueryGeometry:x,scale:m})}}},x=0;x=t.browser.now())return!0}return!1},r.prototype.setFeatureState=function(t,e,r){t=t||"_geojsonTileLayer",this._state.updateState(t,e,r)},r.prototype.removeFeatureState=function(t,e,r){t=t||"_geojsonTileLayer",this._state.removeFeatureState(t,e,r)},r.prototype.getFeatureState=function(t,e){return t=t||"_geojsonTileLayer",this._state.getState(t,e)},r.prototype.setDependencies=function(t,e,r){var n=this._tiles[t];n&&n.setDependencies(e,r)},r.prototype.reloadTilesForDependencies=function(t,e){for(var r in this._tiles){this._tiles[r].hasDependency(t,e)&&this._reloadTile(r,"reloading")}this._cache.filter((function(r){return!r.hasDependency(t,e)}))},r}(t.Evented);function Pt(t,e){var r=Math.abs(2*t.wrap)-+(t.wrap<0),n=Math.abs(2*e.wrap)-+(e.wrap<0);return t.overscaledZ-e.overscaledZ||n-r||e.canonical.y-t.canonical.y||e.canonical.x-t.canonical.x}function It(t){return"raster"===t||"image"===t||"video"===t}function Ot(){return new t.window.Worker(Zi.workerUrl)}Ct.maxOverzooming=10,Ct.maxUnderzooming=3;var zt="mapboxgl_preloaded_worker_pool",Dt=function(){this.active={}};Dt.prototype.acquire=function(t){if(!this.workers)for(this.workers=[];this.workers.length0?(i-o)/s:0;return this.points[a].mult(1-l).add(this.points[r].mult(l))};var Kt=function(t,e,r){var n=this.boxCells=[],i=this.circleCells=[];this.xCellCount=Math.ceil(t/r),this.yCellCount=Math.ceil(e/r);for(var a=0;a=-e[0]&&r<=e[0]&&n>=-e[1]&&n<=e[1]}function ne(e,r,n,i,a,o,s,l){var c=i?e.textSizeData:e.iconSizeData,u=t.evaluateSizeForZoom(c,n.transform.zoom),f=[256/n.width*2+1,256/n.height*2+1],h=i?e.text.dynamicLayoutVertexArray:e.icon.dynamicLayoutVertexArray;h.clear();for(var p=e.lineVertexArray,d=i?e.text.placedSymbolArray:e.icon.placedSymbolArray,m=n.transform.width/n.transform.height,g=!1,v=0;vMath.abs(n.x-r.x)*i)return{useVertical:!0};return(e===t.WritingMode.vertical?r.yn.x)?{needsFlipping:!0}:null}function oe(e,r,n,i,a,o,s,l,c,u,f,h,p,d){var m,g=r/24,v=e.lineOffsetX*g,y=e.lineOffsetY*g;if(e.numGlyphs>1){var x=e.glyphStartIndex+e.numGlyphs,b=e.lineStartIndex,_=e.lineStartIndex+e.lineLength,w=ie(g,l,v,y,n,f,h,e,c,o,p);if(!w)return{notEnoughRoom:!0};var T=te(w.first.point,s).point,k=te(w.last.point,s).point;if(i&&!n){var A=ae(e.writingMode,T,k,d);if(A)return A}m=[w.first];for(var M=e.glyphStartIndex+1;M0?C.point:se(h,L,S,1,a),I=ae(e.writingMode,S,P,d);if(I)return I}var O=le(g*l.getoffsetX(e.glyphStartIndex),v,y,n,f,h,e.segment,e.lineStartIndex,e.lineStartIndex+e.lineLength,c,o,p);if(!O)return{notEnoughRoom:!0};m=[O]}for(var z=0,D=m;z0?1:-1,m=0;i&&(d*=-1,m=Math.PI),d<0&&(m+=Math.PI);for(var g=d>0?l+s:l+s+1,v=a,y=a,x=0,b=0,_=Math.abs(p),w=[];x+b<=_;){if((g+=d)=c)return null;if(y=v,w.push(v),void 0===(v=h[g])){var T=new t.Point(u.getx(g),u.gety(g)),k=te(T,f);if(k.signedDistanceFromCamera>0)v=h[g]=k.point;else{var A=g-d;v=se(0===x?o:new t.Point(u.getx(A),u.gety(A)),T,y,_-x+1,f)}}x+=b,b=y.dist(v)}var M=(_-x)/b,S=v.sub(y),E=S.mult(M)._add(y);E._add(S._unit()._perp()._mult(n*d));var L=m+Math.atan2(v.y-y.y,v.x-y.x);return w.push(E),{point:E,angle:L,path:w}}Kt.prototype.keysLength=function(){return this.boxKeys.length+this.circleKeys.length},Kt.prototype.insert=function(t,e,r,n,i){this._forEachCell(e,r,n,i,this._insertBoxCell,this.boxUid++),this.boxKeys.push(t),this.bboxes.push(e),this.bboxes.push(r),this.bboxes.push(n),this.bboxes.push(i)},Kt.prototype.insertCircle=function(t,e,r,n){this._forEachCell(e-n,r-n,e+n,r+n,this._insertCircleCell,this.circleUid++),this.circleKeys.push(t),this.circles.push(e),this.circles.push(r),this.circles.push(n)},Kt.prototype._insertBoxCell=function(t,e,r,n,i,a){this.boxCells[i].push(a)},Kt.prototype._insertCircleCell=function(t,e,r,n,i,a){this.circleCells[i].push(a)},Kt.prototype._query=function(t,e,r,n,i,a){if(r<0||t>this.width||n<0||e>this.height)return!i&&[];var o=[];if(t<=0&&e<=0&&this.width<=r&&this.height<=n){if(i)return!0;for(var s=0;s0:o},Kt.prototype._queryCircle=function(t,e,r,n,i){var a=t-r,o=t+r,s=e-r,l=e+r;if(o<0||a>this.width||l<0||s>this.height)return!n&&[];var c=[],u={hitTest:n,circle:{x:t,y:e,radius:r},seenUids:{box:{},circle:{}}};return this._forEachCell(a,s,o,l,this._queryCellCircle,c,u,i),n?c.length>0:c},Kt.prototype.query=function(t,e,r,n,i){return this._query(t,e,r,n,!1,i)},Kt.prototype.hitTest=function(t,e,r,n,i){return this._query(t,e,r,n,!0,i)},Kt.prototype.hitTestCircle=function(t,e,r,n){return this._queryCircle(t,e,r,!0,n)},Kt.prototype._queryCell=function(t,e,r,n,i,a,o,s){var l=o.seenUids,c=this.boxCells[i];if(null!==c)for(var u=this.bboxes,f=0,h=c;f=u[d+0]&&n>=u[d+1]&&(!s||s(this.boxKeys[p]))){if(o.hitTest)return a.push(!0),!0;a.push({key:this.boxKeys[p],x1:u[d],y1:u[d+1],x2:u[d+2],y2:u[d+3]})}}}var m=this.circleCells[i];if(null!==m)for(var g=this.circles,v=0,y=m;vo*o+s*s},Kt.prototype._circleAndRectCollide=function(t,e,r,n,i,a,o){var s=(a-n)/2,l=Math.abs(t-(n+s));if(l>s+r)return!1;var c=(o-i)/2,u=Math.abs(e-(i+c));if(u>c+r)return!1;if(l<=s||u<=c)return!0;var f=l-s,h=u-c;return f*f+h*h<=r*r};var ce=new Float32Array([-1/0,-1/0,0,-1/0,-1/0,0,-1/0,-1/0,0,-1/0,-1/0,0]);function ue(t,e){for(var r=0;r=1;P--)C.push(E.path[P]);for(var I=1;I0){for(var R=C[0].clone(),F=C[0].clone(),B=1;B=A.x&&F.x<=M.x&&R.y>=A.y&&F.y<=M.y?[C]:F.xM.x||F.yM.y?[]:t.clipLine([C],A.x,A.y,M.x,M.y)}for(var N=0,j=D;N=this.screenRightBoundary||n<100||e>this.screenBottomBoundary},he.prototype.isInsideGrid=function(t,e,r,n){return r>=0&&t=0&&e0)return this.prevPlacement&&this.prevPlacement.variableOffsets[f.crossTileID]&&this.prevPlacement.placements[f.crossTileID]&&this.prevPlacement.placements[f.crossTileID].text&&(m=this.prevPlacement.variableOffsets[f.crossTileID].anchor),this.variableOffsets[f.crossTileID]={textOffset:g,width:r,height:n,anchor:t,textBoxScale:i,prevAnchor:m},this.markUsedJustification(h,t,f,p),h.allowVerticalPlacement&&(this.markUsedOrientation(h,p,f),this.placedOrientations[f.crossTileID]=p),{shift:v,placedGlyphBoxes:y}},we.prototype.placeLayerBucketPart=function(e,r,n){var i=this,a=e.parameters,o=a.bucket,s=a.layout,l=a.posMatrix,c=a.textLabelPlaneMatrix,u=a.labelToScreenMatrix,f=a.textPixelRatio,h=a.holdingForFade,p=a.collisionBoxArray,d=a.partiallyEvaluatedTextSize,m=a.collisionGroup,g=s.get("text-optional"),v=s.get("icon-optional"),y=s.get("text-allow-overlap"),x=s.get("icon-allow-overlap"),b="map"===s.get("text-rotation-alignment"),_="map"===s.get("text-pitch-alignment"),w="none"!==s.get("icon-text-fit"),T="viewport-y"===s.get("symbol-z-order"),k=y&&(x||!o.hasIconData()||v),A=x&&(y||!o.hasTextData()||g);!o.collisionArrays&&p&&o.deserializeCollisionBoxes(p);var M=function(e,a){if(!r[e.crossTileID])if(h)i.placements[e.crossTileID]=new ge(!1,!1,!1);else{var p,T=!1,M=!1,S=!0,E=null,L={box:null,offscreen:null},C={box:null,offscreen:null},P=null,I=null,O=0,z=0,D=0;a.textFeatureIndex?O=a.textFeatureIndex:e.useRuntimeCollisionCircles&&(O=e.featureIndex),a.verticalTextFeatureIndex&&(z=a.verticalTextFeatureIndex);var R=a.textBox;if(R){var F=function(r){var n=t.WritingMode.horizontal;if(o.allowVerticalPlacement&&!r&&i.prevPlacement){var a=i.prevPlacement.placedOrientations[e.crossTileID];a&&(i.placedOrientations[e.crossTileID]=a,n=a,i.markUsedOrientation(o,n,e))}return n},B=function(r,n){if(o.allowVerticalPlacement&&e.numVerticalGlyphVertices>0&&a.verticalTextBox)for(var i=0,s=o.writingModes;i0&&(N=N.filter((function(t){return t!==j.anchor}))).unshift(j.anchor)}var U=function(t,r,n){for(var a=t.x2-t.x1,s=t.y2-t.y1,c=e.textBoxScale,u=w&&!x?r:null,h={box:[],offscreen:!1},p=y?2*N.length:N.length,d=0;d=N.length,k=i.attemptAnchorPlacement(g,t,a,s,c,b,_,f,l,m,v,e,o,n,u);if(k&&(h=k.placedGlyphBoxes)&&h.box&&h.box.length){T=!0,E=k.shift;break}}return h};B((function(){return U(R,a.iconBox,t.WritingMode.horizontal)}),(function(){var r=a.verticalTextBox,n=L&&L.box&&L.box.length;return o.allowVerticalPlacement&&!n&&e.numVerticalGlyphVertices>0&&r?U(r,a.verticalIconBox,t.WritingMode.vertical):{box:null,offscreen:null}})),L&&(T=L.box,S=L.offscreen);var V=F(L&&L.box);if(!T&&i.prevPlacement){var H=i.prevPlacement.variableOffsets[e.crossTileID];H&&(i.variableOffsets[e.crossTileID]=H,i.markUsedJustification(o,H.anchor,e,V))}}else{var q=function(t,r){var n=i.collisionIndex.placeCollisionBox(t,y,f,l,m.predicate);return n&&n.box&&n.box.length&&(i.markUsedOrientation(o,r,e),i.placedOrientations[e.crossTileID]=r),n};B((function(){return q(R,t.WritingMode.horizontal)}),(function(){var r=a.verticalTextBox;return o.allowVerticalPlacement&&e.numVerticalGlyphVertices>0&&r?q(r,t.WritingMode.vertical):{box:null,offscreen:null}})),F(L&&L.box&&L.box.length)}}if(T=(p=L)&&p.box&&p.box.length>0,S=p&&p.offscreen,e.useRuntimeCollisionCircles){var G=o.text.placedSymbolArray.get(e.centerJustifiedTextSymbolIndex),Y=t.evaluateSizeForFeature(o.textSizeData,d,G),W=s.get("text-padding"),X=e.collisionCircleDiameter;P=i.collisionIndex.placeCollisionCircles(y,G,o.lineVertexArray,o.glyphOffsetArray,Y,l,c,u,n,_,m.predicate,X,W),T=y||P.circles.length>0&&!P.collisionDetected,S=S&&P.offscreen}if(a.iconFeatureIndex&&(D=a.iconFeatureIndex),a.iconBox){var Z=function(t){var e=w&&E?_e(t,E.x,E.y,b,_,i.transform.angle):t;return i.collisionIndex.placeCollisionBox(e,x,f,l,m.predicate)};M=C&&C.box&&C.box.length&&a.verticalIconBox?(I=Z(a.verticalIconBox)).box.length>0:(I=Z(a.iconBox)).box.length>0,S=S&&I.offscreen}var J=g||0===e.numHorizontalGlyphVertices&&0===e.numVerticalGlyphVertices,K=v||0===e.numIconVertices;if(J||K?K?J||(M=M&&T):T=M&&T:M=T=M&&T,T&&p&&p.box&&(C&&C.box&&z?i.collisionIndex.insertCollisionBox(p.box,s.get("text-ignore-placement"),o.bucketInstanceId,z,m.ID):i.collisionIndex.insertCollisionBox(p.box,s.get("text-ignore-placement"),o.bucketInstanceId,O,m.ID)),M&&I&&i.collisionIndex.insertCollisionBox(I.box,s.get("icon-ignore-placement"),o.bucketInstanceId,D,m.ID),P&&(T&&i.collisionIndex.insertCollisionCircles(P.circles,s.get("text-ignore-placement"),o.bucketInstanceId,O,m.ID),n)){var Q=o.bucketInstanceId,$=i.collisionCircleArrays[Q];void 0===$&&($=i.collisionCircleArrays[Q]=new ve);for(var tt=0;tt=0;--E){var L=S[E];M(o.symbolInstances.get(L),o.collisionArrays[L])}else for(var C=e.symbolInstanceStart;C=0&&(e.text.placedSymbolArray.get(c).crossTileID=a>=0&&c!==a?0:n.crossTileID)}},we.prototype.markUsedOrientation=function(e,r,n){for(var i=r===t.WritingMode.horizontal||r===t.WritingMode.horizontalOnly?r:0,a=r===t.WritingMode.vertical?r:0,o=0,s=[n.leftJustifiedTextSymbolIndex,n.centerJustifiedTextSymbolIndex,n.rightJustifiedTextSymbolIndex];o0||l>0,x=a.numIconVertices>0,b=i.placedOrientations[a.crossTileID],_=b===t.WritingMode.vertical,w=b===t.WritingMode.horizontal||b===t.WritingMode.horizontalOnly;if(y){var T=Pe(v.text),k=_?Ie:T;d(e.text,s,k);var A=w?Ie:T;d(e.text,l,A);var M=v.text.isHidden();[a.rightJustifiedTextSymbolIndex,a.centerJustifiedTextSymbolIndex,a.leftJustifiedTextSymbolIndex].forEach((function(t){t>=0&&(e.text.placedSymbolArray.get(t).hidden=M||_?1:0)})),a.verticalPlacedTextSymbolIndex>=0&&(e.text.placedSymbolArray.get(a.verticalPlacedTextSymbolIndex).hidden=M||w?1:0);var S=i.variableOffsets[a.crossTileID];S&&i.markUsedJustification(e,S.anchor,a,b);var E=i.placedOrientations[a.crossTileID];E&&(i.markUsedJustification(e,"left",a,E),i.markUsedOrientation(e,E,a))}if(x){var L=Pe(v.icon),C=!(h&&a.verticalPlacedIconSymbolIndex&&_);if(a.placedIconSymbolIndex>=0){var P=C?L:Ie;d(e.icon,a.numIconVertices,P),e.icon.placedSymbolArray.get(a.placedIconSymbolIndex).hidden=v.icon.isHidden()}if(a.verticalPlacedIconSymbolIndex>=0){var I=C?Ie:L;d(e.icon,a.numVerticalIconVertices,I),e.icon.placedSymbolArray.get(a.verticalPlacedIconSymbolIndex).hidden=v.icon.isHidden()}}if(e.hasIconCollisionBoxData()||e.hasTextCollisionBoxData()){var O=e.collisionArrays[n];if(O){var z=new t.Point(0,0);if(O.textBox||O.verticalTextBox){var D=!0;if(c){var R=i.variableOffsets[m];R?(z=be(R.anchor,R.width,R.height,R.textOffset,R.textBoxScale),u&&z._rotate(f?i.transform.angle:-i.transform.angle)):D=!1}O.textBox&&Te(e.textCollisionBox.collisionVertexArray,v.text.placed,!D||_,z.x,z.y),O.verticalTextBox&&Te(e.textCollisionBox.collisionVertexArray,v.text.placed,!D||w,z.x,z.y)}var F=Boolean(!w&&O.verticalIconBox);O.iconBox&&Te(e.iconCollisionBox.collisionVertexArray,v.icon.placed,F,h?z.x:0,h?z.y:0),O.verticalIconBox&&Te(e.iconCollisionBox.collisionVertexArray,v.icon.placed,!F,h?z.x:0,h?z.y:0)}}},g=0;gt},we.prototype.setStale=function(){this.stale=!0};var ke=Math.pow(2,25),Ae=Math.pow(2,24),Me=Math.pow(2,17),Se=Math.pow(2,16),Ee=Math.pow(2,9),Le=Math.pow(2,8),Ce=Math.pow(2,1);function Pe(t){if(0===t.opacity&&!t.placed)return 0;if(1===t.opacity&&t.placed)return 4294967295;var e=t.placed?1:0,r=Math.floor(127*t.opacity);return r*ke+e*Ae+r*Me+e*Se+r*Ee+e*Le+r*Ce+e}var Ie=0,Oe=function(t){this._sortAcrossTiles="viewport-y"!==t.layout.get("symbol-z-order")&&void 0!==t.layout.get("symbol-sort-key").constantOr(1),this._currentTileIndex=0,this._currentPartIndex=0,this._seenCrossTileIDs={},this._bucketParts=[]};Oe.prototype.continuePlacement=function(t,e,r,n,i){for(var a=this._bucketParts;this._currentTileIndex2};this._currentPlacementIndex>=0;){var s=r[e[this._currentPlacementIndex]],l=this.placement.collisionIndex.transform.zoom;if("symbol"===s.type&&(!s.minzoom||s.minzoom<=l)&&(!s.maxzoom||s.maxzoom>l)){if(this._inProgressLayer||(this._inProgressLayer=new Oe(s)),this._inProgressLayer.continuePlacement(n[s.source],this.placement,this._showCollisionBoxes,s,o))return;delete this._inProgressLayer}this._currentPlacementIndex--}this._done=!0},ze.prototype.commit=function(t){return this.placement.commit(t),this.placement};var De=512/t.EXTENT/2,Re=function(t,e,r){this.tileID=t,this.indexedSymbolInstances={},this.bucketInstanceId=r;for(var n=0;nt.overscaledZ)for(var s in o){var l=o[s];l.tileID.isChildOf(t)&&l.findMatches(e.symbolInstances,t,i)}else{var c=o[t.scaledTo(Number(a)).key];c&&c.findMatches(e.symbolInstances,t,i)}}for(var u=0;u1?"@2x":"",l=t.getJSON(r.transformRequest(r.normalizeSpriteURL(e,s,".json"),t.ResourceType.SpriteJSON),(function(t,e){l=null,o||(o=t,i=e,u())})),c=t.getImage(r.transformRequest(r.normalizeSpriteURL(e,s,".png"),t.ResourceType.SpriteImage),(function(t,e){c=null,o||(o=t,a=e,u())}));function u(){if(o)n(o);else if(i&&a){var e=t.browser.getImageData(a),r={};for(var s in i){var l=i[s],c=l.width,u=l.height,f=l.x,h=l.y,p=l.sdf,d=l.pixelRatio,m=l.stretchX,g=l.stretchY,v=l.content,y=new t.RGBAImage({width:c,height:u});t.RGBAImage.copy(e,y,{x:f,y:h},{x:0,y:0},{width:c,height:u}),r[s]={data:y,pixelRatio:d,sdf:p,stretchX:m,stretchY:g,content:v}}n(null,r)}}return{cancel:function(){l&&(l.cancel(),l=null),c&&(c.cancel(),c=null)}}}(e,this.map._requestManager,(function(e,n){if(r._spriteRequest=null,e)r.fire(new t.ErrorEvent(e));else if(n)for(var i in n)r.imageManager.addImage(i,n[i]);r.imageManager.setLoaded(!0),r._availableImages=r.imageManager.listImages(),r.dispatcher.broadcast("setImages",r._availableImages),r.fire(new t.Event("data",{dataType:"style"}))}))},r.prototype._validateLayer=function(e){var r=this.sourceCaches[e.source];if(r){var n=e.sourceLayer;if(n){var i=r.getSource();("geojson"===i.type||i.vectorLayerIds&&-1===i.vectorLayerIds.indexOf(n))&&this.fire(new t.ErrorEvent(new Error('Source layer "'+n+'" does not exist on source "'+i.id+'" as specified by style layer "'+e.id+'"')))}}},r.prototype.loaded=function(){if(!this._loaded)return!1;if(Object.keys(this._updatedSources).length)return!1;for(var t in this.sourceCaches)if(!this.sourceCaches[t].loaded())return!1;return!!this.imageManager.isLoaded()},r.prototype._serializeLayers=function(t){for(var e=[],r=0,n=t;r0)throw new Error("Unimplemented: "+i.map((function(t){return t.command})).join(", ")+".");return n.forEach((function(t){"setTransition"!==t.command&&r[t.command].apply(r,t.args)})),this.stylesheet=e,!0},r.prototype.addImage=function(e,r){if(this.getImage(e))return this.fire(new t.ErrorEvent(new Error("An image with this name already exists.")));this.imageManager.addImage(e,r),this._availableImages=this.imageManager.listImages(),this._changedImages[e]=!0,this._changed=!0,this.fire(new t.Event("data",{dataType:"style"}))},r.prototype.updateImage=function(t,e){this.imageManager.updateImage(t,e)},r.prototype.getImage=function(t){return this.imageManager.getImage(t)},r.prototype.removeImage=function(e){if(!this.getImage(e))return this.fire(new t.ErrorEvent(new Error("No image with this name exists.")));this.imageManager.removeImage(e),this._availableImages=this.imageManager.listImages(),this._changedImages[e]=!0,this._changed=!0,this.fire(new t.Event("data",{dataType:"style"}))},r.prototype.listImages=function(){return this._checkLoaded(),this.imageManager.listImages()},r.prototype.addSource=function(e,r,n){var i=this;if(void 0===n&&(n={}),this._checkLoaded(),void 0!==this.sourceCaches[e])throw new Error("There is already a source with this ID");if(!r.type)throw new Error("The type property must be defined, but the only the following properties were given: "+Object.keys(r).join(", ")+".");if(!(["vector","raster","geojson","video","image"].indexOf(r.type)>=0)||!this._validate(t.validateStyle.source,"sources."+e,r,null,n)){this.map&&this.map._collectResourceTiming&&(r.collectResourceTiming=!0);var a=this.sourceCaches[e]=new Ct(e,r,this.dispatcher);a.style=this,a.setEventedParent(this,(function(){return{isSourceLoaded:i.loaded(),source:a.serialize(),sourceId:e}})),a.onAdd(this.map),this._changed=!0}},r.prototype.removeSource=function(e){if(this._checkLoaded(),void 0===this.sourceCaches[e])throw new Error("There is no source with this ID");for(var r in this._layers)if(this._layers[r].source===e)return this.fire(new t.ErrorEvent(new Error('Source "'+e+'" cannot be removed while layer "'+r+'" is using it.')));var n=this.sourceCaches[e];delete this.sourceCaches[e],delete this._updatedSources[e],n.fire(new t.Event("data",{sourceDataType:"metadata",dataType:"source",sourceId:e})),n.setEventedParent(null),n.clearTiles(),n.onRemove&&n.onRemove(this.map),this._changed=!0},r.prototype.setGeoJSONSourceData=function(t,e){this._checkLoaded(),this.sourceCaches[t].getSource().setData(e),this._changed=!0},r.prototype.getSource=function(t){return this.sourceCaches[t]&&this.sourceCaches[t].getSource()},r.prototype.addLayer=function(e,r,n){void 0===n&&(n={}),this._checkLoaded();var i=e.id;if(this.getLayer(i))this.fire(new t.ErrorEvent(new Error('Layer with id "'+i+'" already exists on this map')));else{var a;if("custom"===e.type){if(je(this,t.validateCustomStyleLayer(e)))return;a=t.createStyleLayer(e)}else{if("object"==typeof e.source&&(this.addSource(i,e.source),e=t.clone$1(e),e=t.extend(e,{source:i})),this._validate(t.validateStyle.layer,"layers."+i,e,{arrayIndex:-1},n))return;a=t.createStyleLayer(e),this._validateLayer(a),a.setEventedParent(this,{layer:{id:i}}),this._serializedLayers[a.id]=a.serialize()}var o=r?this._order.indexOf(r):this._order.length;if(r&&-1===o)this.fire(new t.ErrorEvent(new Error('Layer with id "'+r+'" does not exist on this map.')));else{if(this._order.splice(o,0,i),this._layerOrderChanged=!0,this._layers[i]=a,this._removedLayers[i]&&a.source&&"custom"!==a.type){var s=this._removedLayers[i];delete this._removedLayers[i],s.type!==a.type?this._updatedSources[a.source]="clear":(this._updatedSources[a.source]="reload",this.sourceCaches[a.source].pause())}this._updateLayer(a),a.onAdd&&a.onAdd(this.map)}}},r.prototype.moveLayer=function(e,r){if(this._checkLoaded(),this._changed=!0,this._layers[e]){if(e!==r){var n=this._order.indexOf(e);this._order.splice(n,1);var i=r?this._order.indexOf(r):this._order.length;r&&-1===i?this.fire(new t.ErrorEvent(new Error('Layer with id "'+r+'" does not exist on this map.'))):(this._order.splice(i,0,e),this._layerOrderChanged=!0)}}else this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be moved.")))},r.prototype.removeLayer=function(e){this._checkLoaded();var r=this._layers[e];if(r){r.setEventedParent(null);var n=this._order.indexOf(e);this._order.splice(n,1),this._layerOrderChanged=!0,this._changed=!0,this._removedLayers[e]=r,delete this._layers[e],delete this._serializedLayers[e],delete this._updatedLayers[e],delete this._updatedPaintProps[e],r.onRemove&&r.onRemove(this.map)}else this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be removed.")))},r.prototype.getLayer=function(t){return this._layers[t]},r.prototype.hasLayer=function(t){return t in this._layers},r.prototype.setLayerZoomRange=function(e,r,n){this._checkLoaded();var i=this.getLayer(e);i?i.minzoom===r&&i.maxzoom===n||(null!=r&&(i.minzoom=r),null!=n&&(i.maxzoom=n),this._updateLayer(i)):this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot have zoom extent.")))},r.prototype.setFilter=function(e,r,n){void 0===n&&(n={}),this._checkLoaded();var i=this.getLayer(e);if(i){if(!t.deepEqual(i.filter,r))return null==r?(i.filter=void 0,void this._updateLayer(i)):void(this._validate(t.validateStyle.filter,"layers."+i.id+".filter",r,null,n)||(i.filter=t.clone$1(r),this._updateLayer(i)))}else this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be filtered.")))},r.prototype.getFilter=function(e){return t.clone$1(this.getLayer(e).filter)},r.prototype.setLayoutProperty=function(e,r,n,i){void 0===i&&(i={}),this._checkLoaded();var a=this.getLayer(e);a?t.deepEqual(a.getLayoutProperty(r),n)||(a.setLayoutProperty(r,n,i),this._updateLayer(a)):this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be styled.")))},r.prototype.getLayoutProperty=function(e,r){var n=this.getLayer(e);if(n)return n.getLayoutProperty(r);this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style.")))},r.prototype.setPaintProperty=function(e,r,n,i){void 0===i&&(i={}),this._checkLoaded();var a=this.getLayer(e);a?t.deepEqual(a.getPaintProperty(r),n)||(a.setPaintProperty(r,n,i)&&this._updateLayer(a),this._changed=!0,this._updatedPaintProps[e]=!0):this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be styled.")))},r.prototype.getPaintProperty=function(t,e){return this.getLayer(t).getPaintProperty(e)},r.prototype.setFeatureState=function(e,r){this._checkLoaded();var n=e.source,i=e.sourceLayer,a=this.sourceCaches[n];if(void 0!==a){var o=a.getSource().type;"geojson"===o&&i?this.fire(new t.ErrorEvent(new Error("GeoJSON sources cannot have a sourceLayer parameter."))):"vector"!==o||i?(void 0===e.id&&this.fire(new t.ErrorEvent(new Error("The feature id parameter must be provided."))),a.setFeatureState(i,e.id,r)):this.fire(new t.ErrorEvent(new Error("The sourceLayer parameter must be provided for vector source types.")))}else this.fire(new t.ErrorEvent(new Error("The source '"+n+"' does not exist in the map's style.")))},r.prototype.removeFeatureState=function(e,r){this._checkLoaded();var n=e.source,i=this.sourceCaches[n];if(void 0!==i){var a=i.getSource().type,o="vector"===a?e.sourceLayer:void 0;"vector"!==a||o?r&&"string"!=typeof e.id&&"number"!=typeof e.id?this.fire(new t.ErrorEvent(new Error("A feature id is requred to remove its specific state property."))):i.removeFeatureState(o,e.id,r):this.fire(new t.ErrorEvent(new Error("The sourceLayer parameter must be provided for vector source types.")))}else this.fire(new t.ErrorEvent(new Error("The source '"+n+"' does not exist in the map's style.")))},r.prototype.getFeatureState=function(e){this._checkLoaded();var r=e.source,n=e.sourceLayer,i=this.sourceCaches[r];if(void 0!==i){if("vector"!==i.getSource().type||n)return void 0===e.id&&this.fire(new t.ErrorEvent(new Error("The feature id parameter must be provided."))),i.getFeatureState(n,e.id);this.fire(new t.ErrorEvent(new Error("The sourceLayer parameter must be provided for vector source types.")))}else this.fire(new t.ErrorEvent(new Error("The source '"+r+"' does not exist in the map's style.")))},r.prototype.getTransition=function(){return t.extend({duration:300,delay:0},this.stylesheet&&this.stylesheet.transition)},r.prototype.serialize=function(){return t.filterObject({version:this.stylesheet.version,name:this.stylesheet.name,metadata:this.stylesheet.metadata,light:this.stylesheet.light,center:this.stylesheet.center,zoom:this.stylesheet.zoom,bearing:this.stylesheet.bearing,pitch:this.stylesheet.pitch,sprite:this.stylesheet.sprite,glyphs:this.stylesheet.glyphs,transition:this.stylesheet.transition,sources:t.mapObject(this.sourceCaches,(function(t){return t.serialize()})),layers:this._serializeLayers(this._order)},(function(t){return void 0!==t}))},r.prototype._updateLayer=function(t){this._updatedLayers[t.id]=!0,t.source&&!this._updatedSources[t.source]&&"raster"!==this.sourceCaches[t.source].getSource().type&&(this._updatedSources[t.source]="reload",this.sourceCaches[t.source].pause()),this._changed=!0},r.prototype._flattenAndSortRenderedFeatures=function(t){for(var e=this,r=function(t){return"fill-extrusion"===e._layers[t].type},n={},i=[],a=this._order.length-1;a>=0;a--){var o=this._order[a];if(r(o)){n[o]=a;for(var s=0,l=t;s=0;d--){var m=this._order[d];if(r(m))for(var g=i.length-1;g>=0;g--){var v=i[g].feature;if(n[v.layer.id] 0.5) {gl_FragColor=vec4(0.0,0.0,1.0,0.5)*alpha;}if (v_notUsed > 0.5) {gl_FragColor*=.1;}}","attribute vec2 a_pos;attribute vec2 a_anchor_pos;attribute vec2 a_extrude;attribute vec2 a_placed;attribute vec2 a_shift;uniform mat4 u_matrix;uniform vec2 u_extrude_scale;uniform float u_camera_to_center_distance;varying float v_placed;varying float v_notUsed;void main() {vec4 projectedPoint=u_matrix*vec4(a_anchor_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float collision_perspective_ratio=clamp(0.5+0.5*(u_camera_to_center_distance/camera_to_anchor_distance),0.0,4.0);gl_Position=u_matrix*vec4(a_pos,0.0,1.0);gl_Position.xy+=(a_extrude+a_shift)*u_extrude_scale*gl_Position.w*collision_perspective_ratio;v_placed=a_placed.x;v_notUsed=a_placed.y;}"),tr=yr("varying float v_radius;varying vec2 v_extrude;varying float v_perspective_ratio;varying float v_collision;void main() {float alpha=0.5*min(v_perspective_ratio,1.0);float stroke_radius=0.9*max(v_perspective_ratio,1.0);float distance_to_center=length(v_extrude);float distance_to_edge=abs(distance_to_center-v_radius);float opacity_t=smoothstep(-stroke_radius,0.0,-distance_to_edge);vec4 color=mix(vec4(0.0,0.0,1.0,0.5),vec4(1.0,0.0,0.0,1.0),v_collision);gl_FragColor=color*alpha*opacity_t;}","attribute vec2 a_pos;attribute float a_radius;attribute vec2 a_flags;uniform mat4 u_matrix;uniform mat4 u_inv_matrix;uniform vec2 u_viewport_size;uniform float u_camera_to_center_distance;varying float v_radius;varying vec2 v_extrude;varying float v_perspective_ratio;varying float v_collision;vec3 toTilePosition(vec2 screenPos) {vec4 rayStart=u_inv_matrix*vec4(screenPos,-1.0,1.0);vec4 rayEnd =u_inv_matrix*vec4(screenPos, 1.0,1.0);rayStart.xyz/=rayStart.w;rayEnd.xyz /=rayEnd.w;highp float t=(0.0-rayStart.z)/(rayEnd.z-rayStart.z);return mix(rayStart.xyz,rayEnd.xyz,t);}void main() {vec2 quadCenterPos=a_pos;float radius=a_radius;float collision=a_flags.x;float vertexIdx=a_flags.y;vec2 quadVertexOffset=vec2(mix(-1.0,1.0,float(vertexIdx >=2.0)),mix(-1.0,1.0,float(vertexIdx >=1.0 && vertexIdx <=2.0)));vec2 quadVertexExtent=quadVertexOffset*radius;vec3 tilePos=toTilePosition(quadCenterPos);vec4 clipPos=u_matrix*vec4(tilePos,1.0);highp float camera_to_anchor_distance=clipPos.w;highp float collision_perspective_ratio=clamp(0.5+0.5*(u_camera_to_center_distance/camera_to_anchor_distance),0.0,4.0);float padding_factor=1.2;v_radius=radius;v_extrude=quadVertexExtent*padding_factor;v_perspective_ratio=collision_perspective_ratio;v_collision=collision;gl_Position=vec4(clipPos.xyz/clipPos.w,1.0)+vec4(quadVertexExtent*padding_factor/u_viewport_size*2.0,0.0,0.0);}"),er=yr("uniform highp vec4 u_color;uniform sampler2D u_overlay;varying vec2 v_uv;void main() {vec4 overlay_color=texture2D(u_overlay,v_uv);gl_FragColor=mix(u_color,overlay_color,overlay_color.a);}","attribute vec2 a_pos;varying vec2 v_uv;uniform mat4 u_matrix;uniform float u_overlay_scale;void main() {v_uv=a_pos/8192.0;gl_Position=u_matrix*vec4(a_pos*u_overlay_scale,0,1);}"),rr=yr("#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float opacity\ngl_FragColor=color*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","attribute vec2 a_pos;uniform mat4 u_matrix;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float opacity\ngl_Position=u_matrix*vec4(a_pos,0,1);}"),nr=yr("varying vec2 v_pos;\n#pragma mapbox: define highp vec4 outline_color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 outline_color\n#pragma mapbox: initialize lowp float opacity\nfloat dist=length(v_pos-gl_FragCoord.xy);float alpha=1.0-smoothstep(0.0,1.0,dist);gl_FragColor=outline_color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","attribute vec2 a_pos;uniform mat4 u_matrix;uniform vec2 u_world;varying vec2 v_pos;\n#pragma mapbox: define highp vec4 outline_color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 outline_color\n#pragma mapbox: initialize lowp float opacity\ngl_Position=u_matrix*vec4(a_pos,0,1);v_pos=(gl_Position.xy/gl_Position.w+1.0)/2.0*u_world;}"),ir=yr("uniform vec2 u_texsize;uniform sampler2D u_image;uniform float u_fade;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec2 v_pos;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;vec2 imagecoord=mod(v_pos_a,1.0);vec2 pos=mix(pattern_tl_a/u_texsize,pattern_br_a/u_texsize,imagecoord);vec4 color1=texture2D(u_image,pos);vec2 imagecoord_b=mod(v_pos_b,1.0);vec2 pos2=mix(pattern_tl_b/u_texsize,pattern_br_b/u_texsize,imagecoord_b);vec4 color2=texture2D(u_image,pos2);float dist=length(v_pos-gl_FragCoord.xy);float alpha=1.0-smoothstep(0.0,1.0,dist);gl_FragColor=mix(color1,color2,u_fade)*alpha*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","uniform mat4 u_matrix;uniform vec2 u_world;uniform vec2 u_pixel_coord_upper;uniform vec2 u_pixel_coord_lower;uniform vec3 u_scale;attribute vec2 a_pos;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec2 v_pos;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;gl_Position=u_matrix*vec4(a_pos,0,1);vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;v_pos_a=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,fromScale*display_size_a,tileRatio,a_pos);v_pos_b=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,toScale*display_size_b,tileRatio,a_pos);v_pos=(gl_Position.xy/gl_Position.w+1.0)/2.0*u_world;}"),ar=yr("uniform vec2 u_texsize;uniform float u_fade;uniform sampler2D u_image;varying vec2 v_pos_a;varying vec2 v_pos_b;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;vec2 imagecoord=mod(v_pos_a,1.0);vec2 pos=mix(pattern_tl_a/u_texsize,pattern_br_a/u_texsize,imagecoord);vec4 color1=texture2D(u_image,pos);vec2 imagecoord_b=mod(v_pos_b,1.0);vec2 pos2=mix(pattern_tl_b/u_texsize,pattern_br_b/u_texsize,imagecoord_b);vec4 color2=texture2D(u_image,pos2);gl_FragColor=mix(color1,color2,u_fade)*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","uniform mat4 u_matrix;uniform vec2 u_pixel_coord_upper;uniform vec2 u_pixel_coord_lower;uniform vec3 u_scale;attribute vec2 a_pos;varying vec2 v_pos_a;varying vec2 v_pos_b;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileZoomRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;gl_Position=u_matrix*vec4(a_pos,0,1);v_pos_a=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,fromScale*display_size_a,tileZoomRatio,a_pos);v_pos_b=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,toScale*display_size_b,tileZoomRatio,a_pos);}"),or=yr("varying vec4 v_color;void main() {gl_FragColor=v_color;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","uniform mat4 u_matrix;uniform vec3 u_lightcolor;uniform lowp vec3 u_lightpos;uniform lowp float u_lightintensity;uniform float u_vertical_gradient;uniform lowp float u_opacity;attribute vec2 a_pos;attribute vec4 a_normal_ed;varying vec4 v_color;\n#pragma mapbox: define highp float base\n#pragma mapbox: define highp float height\n#pragma mapbox: define highp vec4 color\nvoid main() {\n#pragma mapbox: initialize highp float base\n#pragma mapbox: initialize highp float height\n#pragma mapbox: initialize highp vec4 color\nvec3 normal=a_normal_ed.xyz;base=max(0.0,base);height=max(0.0,height);float t=mod(normal.x,2.0);gl_Position=u_matrix*vec4(a_pos,t > 0.0 ? height : base,1);float colorvalue=color.r*0.2126+color.g*0.7152+color.b*0.0722;v_color=vec4(0.0,0.0,0.0,1.0);vec4 ambientlight=vec4(0.03,0.03,0.03,1.0);color+=ambientlight;float directional=clamp(dot(normal/16384.0,u_lightpos),0.0,1.0);directional=mix((1.0-u_lightintensity),max((1.0-colorvalue+u_lightintensity),1.0),directional);if (normal.y !=0.0) {directional*=((1.0-u_vertical_gradient)+(u_vertical_gradient*clamp((t+base)*pow(height/150.0,0.5),mix(0.7,0.98,1.0-u_lightintensity),1.0)));}v_color.r+=clamp(color.r*directional*u_lightcolor.r,mix(0.0,0.3,1.0-u_lightcolor.r),1.0);v_color.g+=clamp(color.g*directional*u_lightcolor.g,mix(0.0,0.3,1.0-u_lightcolor.g),1.0);v_color.b+=clamp(color.b*directional*u_lightcolor.b,mix(0.0,0.3,1.0-u_lightcolor.b),1.0);v_color*=u_opacity;}"),sr=yr("uniform vec2 u_texsize;uniform float u_fade;uniform sampler2D u_image;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec4 v_lighting;\n#pragma mapbox: define lowp float base\n#pragma mapbox: define lowp float height\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float base\n#pragma mapbox: initialize lowp float height\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;vec2 imagecoord=mod(v_pos_a,1.0);vec2 pos=mix(pattern_tl_a/u_texsize,pattern_br_a/u_texsize,imagecoord);vec4 color1=texture2D(u_image,pos);vec2 imagecoord_b=mod(v_pos_b,1.0);vec2 pos2=mix(pattern_tl_b/u_texsize,pattern_br_b/u_texsize,imagecoord_b);vec4 color2=texture2D(u_image,pos2);vec4 mixedColor=mix(color1,color2,u_fade);gl_FragColor=mixedColor*v_lighting;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","uniform mat4 u_matrix;uniform vec2 u_pixel_coord_upper;uniform vec2 u_pixel_coord_lower;uniform float u_height_factor;uniform vec3 u_scale;uniform float u_vertical_gradient;uniform lowp float u_opacity;uniform vec3 u_lightcolor;uniform lowp vec3 u_lightpos;uniform lowp float u_lightintensity;attribute vec2 a_pos;attribute vec4 a_normal_ed;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec4 v_lighting;\n#pragma mapbox: define lowp float base\n#pragma mapbox: define lowp float height\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float base\n#pragma mapbox: initialize lowp float height\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;vec3 normal=a_normal_ed.xyz;float edgedistance=a_normal_ed.w;vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;base=max(0.0,base);height=max(0.0,height);float t=mod(normal.x,2.0);float z=t > 0.0 ? height : base;gl_Position=u_matrix*vec4(a_pos,z,1);vec2 pos=normal.x==1.0 && normal.y==0.0 && normal.z==16384.0\n? a_pos\n: vec2(edgedistance,z*u_height_factor);v_pos_a=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,fromScale*display_size_a,tileRatio,pos);v_pos_b=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,toScale*display_size_b,tileRatio,pos);v_lighting=vec4(0.0,0.0,0.0,1.0);float directional=clamp(dot(normal/16383.0,u_lightpos),0.0,1.0);directional=mix((1.0-u_lightintensity),max((0.5+u_lightintensity),1.0),directional);if (normal.y !=0.0) {directional*=((1.0-u_vertical_gradient)+(u_vertical_gradient*clamp((t+base)*pow(height/150.0,0.5),mix(0.7,0.98,1.0-u_lightintensity),1.0)));}v_lighting.rgb+=clamp(directional*u_lightcolor,mix(vec3(0.0),vec3(0.3),1.0-u_lightcolor),vec3(1.0));v_lighting*=u_opacity;}"),lr=yr("#ifdef GL_ES\nprecision highp float;\n#endif\nuniform sampler2D u_image;varying vec2 v_pos;uniform vec2 u_dimension;uniform float u_zoom;uniform float u_maxzoom;uniform vec4 u_unpack;float getElevation(vec2 coord,float bias) {vec4 data=texture2D(u_image,coord)*255.0;data.a=-1.0;return dot(data,u_unpack)/4.0;}void main() {vec2 epsilon=1.0/u_dimension;float a=getElevation(v_pos+vec2(-epsilon.x,-epsilon.y),0.0);float b=getElevation(v_pos+vec2(0,-epsilon.y),0.0);float c=getElevation(v_pos+vec2(epsilon.x,-epsilon.y),0.0);float d=getElevation(v_pos+vec2(-epsilon.x,0),0.0);float e=getElevation(v_pos,0.0);float f=getElevation(v_pos+vec2(epsilon.x,0),0.0);float g=getElevation(v_pos+vec2(-epsilon.x,epsilon.y),0.0);float h=getElevation(v_pos+vec2(0,epsilon.y),0.0);float i=getElevation(v_pos+vec2(epsilon.x,epsilon.y),0.0);float exaggeration=u_zoom < 2.0 ? 0.4 : u_zoom < 4.5 ? 0.35 : 0.3;vec2 deriv=vec2((c+f+f+i)-(a+d+d+g),(g+h+h+i)-(a+b+b+c))/ pow(2.0,(u_zoom-u_maxzoom)*exaggeration+19.2562-u_zoom);gl_FragColor=clamp(vec4(deriv.x/2.0+0.5,deriv.y/2.0+0.5,1.0,1.0),0.0,1.0);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","uniform mat4 u_matrix;uniform vec2 u_dimension;attribute vec2 a_pos;attribute vec2 a_texture_pos;varying vec2 v_pos;void main() {gl_Position=u_matrix*vec4(a_pos,0,1);highp vec2 epsilon=1.0/u_dimension;float scale=(u_dimension.x-2.0)/u_dimension.x;v_pos=(a_texture_pos/8192.0)*scale+epsilon;}"),cr=yr("uniform sampler2D u_image;varying vec2 v_pos;uniform vec2 u_latrange;uniform vec2 u_light;uniform vec4 u_shadow;uniform vec4 u_highlight;uniform vec4 u_accent;\n#define PI 3.141592653589793\nvoid main() {vec4 pixel=texture2D(u_image,v_pos);vec2 deriv=((pixel.rg*2.0)-1.0);float scaleFactor=cos(radians((u_latrange[0]-u_latrange[1])*(1.0-v_pos.y)+u_latrange[1]));float slope=atan(1.25*length(deriv)/scaleFactor);float aspect=deriv.x !=0.0 ? atan(deriv.y,-deriv.x) : PI/2.0*(deriv.y > 0.0 ? 1.0 :-1.0);float intensity=u_light.x;float azimuth=u_light.y+PI;float base=1.875-intensity*1.75;float maxValue=0.5*PI;float scaledSlope=intensity !=0.5 ? ((pow(base,slope)-1.0)/(pow(base,maxValue)-1.0))*maxValue : slope;float accent=cos(scaledSlope);vec4 accent_color=(1.0-accent)*u_accent*clamp(intensity*2.0,0.0,1.0);float shade=abs(mod((aspect+azimuth)/PI+0.5,2.0)-1.0);vec4 shade_color=mix(u_shadow,u_highlight,shade)*sin(scaledSlope)*clamp(intensity*2.0,0.0,1.0);gl_FragColor=accent_color*(1.0-shade_color.a)+shade_color;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","uniform mat4 u_matrix;attribute vec2 a_pos;attribute vec2 a_texture_pos;varying vec2 v_pos;void main() {gl_Position=u_matrix*vec4(a_pos,0,1);v_pos=a_texture_pos/8192.0;}"),ur=yr("uniform lowp float u_device_pixel_ratio;varying vec2 v_width2;varying vec2 v_normal;varying float v_gamma_scale;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\nfloat dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);gl_FragColor=color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","\n#define scale 0.015873016\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform mediump float u_ratio;uniform vec2 u_units_to_pixels;uniform lowp float u_device_pixel_ratio;varying vec2 v_normal;varying vec2 v_width2;varying float v_gamma_scale;varying highp float v_linesofar;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float width\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float width\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;v_linesofar=(floor(a_data.z/4.0)+a_data.w*64.0)*2.0;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_width2=vec2(outset,inset);}"),fr=yr("uniform lowp float u_device_pixel_ratio;uniform sampler2D u_image;varying vec2 v_width2;varying vec2 v_normal;varying float v_gamma_scale;varying highp float v_lineprogress;\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\nfloat dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);vec4 color=texture2D(u_image,vec2(v_lineprogress,0.5));gl_FragColor=color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","\n#define MAX_LINE_DISTANCE 32767.0\n#define scale 0.015873016\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform mediump float u_ratio;uniform lowp float u_device_pixel_ratio;uniform vec2 u_units_to_pixels;varying vec2 v_normal;varying vec2 v_width2;varying float v_gamma_scale;varying highp float v_lineprogress;\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float width\nvoid main() {\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float width\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;v_lineprogress=(floor(a_data.z/4.0)+a_data.w*64.0)*2.0/MAX_LINE_DISTANCE;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_width2=vec2(outset,inset);}"),hr=yr("uniform lowp float u_device_pixel_ratio;uniform vec2 u_texsize;uniform float u_fade;uniform mediump vec3 u_scale;uniform sampler2D u_image;varying vec2 v_normal;varying vec2 v_width2;varying float v_linesofar;varying float v_gamma_scale;varying float v_width;\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileZoomRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;vec2 pattern_size_a=vec2(display_size_a.x*fromScale/tileZoomRatio,display_size_a.y);vec2 pattern_size_b=vec2(display_size_b.x*toScale/tileZoomRatio,display_size_b.y);float aspect_a=display_size_a.y/v_width;float aspect_b=display_size_b.y/v_width;float dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);float x_a=mod(v_linesofar/pattern_size_a.x*aspect_a,1.0);float x_b=mod(v_linesofar/pattern_size_b.x*aspect_b,1.0);float y=0.5*v_normal.y+0.5;vec2 texel_size=1.0/u_texsize;vec2 pos_a=mix(pattern_tl_a*texel_size-texel_size,pattern_br_a*texel_size+texel_size,vec2(x_a,y));vec2 pos_b=mix(pattern_tl_b*texel_size-texel_size,pattern_br_b*texel_size+texel_size,vec2(x_b,y));vec4 color=mix(texture2D(u_image,pos_a),texture2D(u_image,pos_b),u_fade);gl_FragColor=color*alpha*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","\n#define scale 0.015873016\n#define LINE_DISTANCE_SCALE 2.0\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform vec2 u_units_to_pixels;uniform mediump float u_ratio;uniform lowp float u_device_pixel_ratio;varying vec2 v_normal;varying vec2 v_width2;varying float v_linesofar;varying float v_gamma_scale;varying float v_width;\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define mediump float width\n#pragma mapbox: define lowp float floorwidth\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize mediump float width\n#pragma mapbox: initialize lowp float floorwidth\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;float a_linesofar=(floor(a_data.z/4.0)+a_data.w*64.0)*LINE_DISTANCE_SCALE;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_linesofar=a_linesofar;v_width2=vec2(outset,inset);v_width=floorwidth;}"),pr=yr("uniform lowp float u_device_pixel_ratio;uniform sampler2D u_image;uniform float u_sdfgamma;uniform float u_mix;varying vec2 v_normal;varying vec2 v_width2;varying vec2 v_tex_a;varying vec2 v_tex_b;varying float v_gamma_scale;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float width\n#pragma mapbox: define lowp float floorwidth\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float width\n#pragma mapbox: initialize lowp float floorwidth\nfloat dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);float sdfdist_a=texture2D(u_image,v_tex_a).a;float sdfdist_b=texture2D(u_image,v_tex_b).a;float sdfdist=mix(sdfdist_a,sdfdist_b,u_mix);alpha*=smoothstep(0.5-u_sdfgamma/floorwidth,0.5+u_sdfgamma/floorwidth,sdfdist);gl_FragColor=color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","\n#define scale 0.015873016\n#define LINE_DISTANCE_SCALE 2.0\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform mediump float u_ratio;uniform lowp float u_device_pixel_ratio;uniform vec2 u_patternscale_a;uniform float u_tex_y_a;uniform vec2 u_patternscale_b;uniform float u_tex_y_b;uniform vec2 u_units_to_pixels;varying vec2 v_normal;varying vec2 v_width2;varying vec2 v_tex_a;varying vec2 v_tex_b;varying float v_gamma_scale;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float width\n#pragma mapbox: define lowp float floorwidth\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float width\n#pragma mapbox: initialize lowp float floorwidth\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;float a_linesofar=(floor(a_data.z/4.0)+a_data.w*64.0)*LINE_DISTANCE_SCALE;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_tex_a=vec2(a_linesofar*u_patternscale_a.x/floorwidth,normal.y*u_patternscale_a.y+u_tex_y_a);v_tex_b=vec2(a_linesofar*u_patternscale_b.x/floorwidth,normal.y*u_patternscale_b.y+u_tex_y_b);v_width2=vec2(outset,inset);}"),dr=yr("uniform float u_fade_t;uniform float u_opacity;uniform sampler2D u_image0;uniform sampler2D u_image1;varying vec2 v_pos0;varying vec2 v_pos1;uniform float u_brightness_low;uniform float u_brightness_high;uniform float u_saturation_factor;uniform float u_contrast_factor;uniform vec3 u_spin_weights;void main() {vec4 color0=texture2D(u_image0,v_pos0);vec4 color1=texture2D(u_image1,v_pos1);if (color0.a > 0.0) {color0.rgb=color0.rgb/color0.a;}if (color1.a > 0.0) {color1.rgb=color1.rgb/color1.a;}vec4 color=mix(color0,color1,u_fade_t);color.a*=u_opacity;vec3 rgb=color.rgb;rgb=vec3(dot(rgb,u_spin_weights.xyz),dot(rgb,u_spin_weights.zxy),dot(rgb,u_spin_weights.yzx));float average=(color.r+color.g+color.b)/3.0;rgb+=(average-rgb)*u_saturation_factor;rgb=(rgb-0.5)*u_contrast_factor+0.5;vec3 u_high_vec=vec3(u_brightness_low,u_brightness_low,u_brightness_low);vec3 u_low_vec=vec3(u_brightness_high,u_brightness_high,u_brightness_high);gl_FragColor=vec4(mix(u_high_vec,u_low_vec,rgb)*color.a,color.a);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","uniform mat4 u_matrix;uniform vec2 u_tl_parent;uniform float u_scale_parent;uniform float u_buffer_scale;attribute vec2 a_pos;attribute vec2 a_texture_pos;varying vec2 v_pos0;varying vec2 v_pos1;void main() {gl_Position=u_matrix*vec4(a_pos,0,1);v_pos0=(((a_texture_pos/8192.0)-0.5)/u_buffer_scale )+0.5;v_pos1=(v_pos0*u_scale_parent)+u_tl_parent;}"),mr=yr("uniform sampler2D u_texture;varying vec2 v_tex;varying float v_fade_opacity;\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\nlowp float alpha=opacity*v_fade_opacity;gl_FragColor=texture2D(u_texture,v_tex)*alpha;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","const float PI=3.141592653589793;attribute vec4 a_pos_offset;attribute vec4 a_data;attribute vec4 a_pixeloffset;attribute vec3 a_projected_pos;attribute float a_fade_opacity;uniform bool u_is_size_zoom_constant;uniform bool u_is_size_feature_constant;uniform highp float u_size_t;uniform highp float u_size;uniform highp float u_camera_to_center_distance;uniform highp float u_pitch;uniform bool u_rotate_symbol;uniform highp float u_aspect_ratio;uniform float u_fade_change;uniform mat4 u_matrix;uniform mat4 u_label_plane_matrix;uniform mat4 u_coord_matrix;uniform bool u_is_text;uniform bool u_pitch_with_map;uniform vec2 u_texsize;varying vec2 v_tex;varying float v_fade_opacity;\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\nvec2 a_pos=a_pos_offset.xy;vec2 a_offset=a_pos_offset.zw;vec2 a_tex=a_data.xy;vec2 a_size=a_data.zw;float a_size_min=floor(a_size[0]*0.5);vec2 a_pxoffset=a_pixeloffset.xy;vec2 a_minFontScale=a_pixeloffset.zw/256.0;highp float segment_angle=-a_projected_pos[2];float size;if (!u_is_size_zoom_constant && !u_is_size_feature_constant) {size=mix(a_size_min,a_size[1],u_size_t)/128.0;} else if (u_is_size_zoom_constant && !u_is_size_feature_constant) {size=a_size_min/128.0;} else {size=u_size;}vec4 projectedPoint=u_matrix*vec4(a_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float distance_ratio=u_pitch_with_map ?\ncamera_to_anchor_distance/u_camera_to_center_distance :\nu_camera_to_center_distance/camera_to_anchor_distance;highp float perspective_ratio=clamp(0.5+0.5*distance_ratio,0.0,4.0);size*=perspective_ratio;float fontScale=u_is_text ? size/24.0 : size;highp float symbol_rotation=0.0;if (u_rotate_symbol) {vec4 offsetProjectedPoint=u_matrix*vec4(a_pos+vec2(1,0),0,1);vec2 a=projectedPoint.xy/projectedPoint.w;vec2 b=offsetProjectedPoint.xy/offsetProjectedPoint.w;symbol_rotation=atan((b.y-a.y)/u_aspect_ratio,b.x-a.x);}highp float angle_sin=sin(segment_angle+symbol_rotation);highp float angle_cos=cos(segment_angle+symbol_rotation);mat2 rotation_matrix=mat2(angle_cos,-1.0*angle_sin,angle_sin,angle_cos);vec4 projected_pos=u_label_plane_matrix*vec4(a_projected_pos.xy,0.0,1.0);gl_Position=u_coord_matrix*vec4(projected_pos.xy/projected_pos.w+rotation_matrix*(a_offset/32.0*max(a_minFontScale,fontScale)+a_pxoffset/16.0),0.0,1.0);v_tex=a_tex/u_texsize;vec2 fade_opacity=unpack_opacity(a_fade_opacity);float fade_change=fade_opacity[1] > 0.5 ? u_fade_change :-u_fade_change;v_fade_opacity=max(0.0,min(1.0,fade_opacity[0]+fade_change));}"),gr=yr("#define SDF_PX 8.0\nuniform bool u_is_halo;uniform sampler2D u_texture;uniform highp float u_gamma_scale;uniform lowp float u_device_pixel_ratio;uniform bool u_is_text;varying vec2 v_data0;varying vec3 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nfloat EDGE_GAMMA=0.105/u_device_pixel_ratio;vec2 tex=v_data0.xy;float gamma_scale=v_data1.x;float size=v_data1.y;float fade_opacity=v_data1[2];float fontScale=u_is_text ? size/24.0 : size;lowp vec4 color=fill_color;highp float gamma=EDGE_GAMMA/(fontScale*u_gamma_scale);lowp float buff=(256.0-64.0)/256.0;if (u_is_halo) {color=halo_color;gamma=(halo_blur*1.19/SDF_PX+EDGE_GAMMA)/(fontScale*u_gamma_scale);buff=(6.0-halo_width/fontScale)/SDF_PX;}lowp float dist=texture2D(u_texture,tex).a;highp float gamma_scaled=gamma*gamma_scale;highp float alpha=smoothstep(buff-gamma_scaled,buff+gamma_scaled,dist);gl_FragColor=color*(alpha*opacity*fade_opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","const float PI=3.141592653589793;attribute vec4 a_pos_offset;attribute vec4 a_data;attribute vec4 a_pixeloffset;attribute vec3 a_projected_pos;attribute float a_fade_opacity;uniform bool u_is_size_zoom_constant;uniform bool u_is_size_feature_constant;uniform highp float u_size_t;uniform highp float u_size;uniform mat4 u_matrix;uniform mat4 u_label_plane_matrix;uniform mat4 u_coord_matrix;uniform bool u_is_text;uniform bool u_pitch_with_map;uniform highp float u_pitch;uniform bool u_rotate_symbol;uniform highp float u_aspect_ratio;uniform highp float u_camera_to_center_distance;uniform float u_fade_change;uniform vec2 u_texsize;varying vec2 v_data0;varying vec3 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nvec2 a_pos=a_pos_offset.xy;vec2 a_offset=a_pos_offset.zw;vec2 a_tex=a_data.xy;vec2 a_size=a_data.zw;float a_size_min=floor(a_size[0]*0.5);vec2 a_pxoffset=a_pixeloffset.xy;highp float segment_angle=-a_projected_pos[2];float size;if (!u_is_size_zoom_constant && !u_is_size_feature_constant) {size=mix(a_size_min,a_size[1],u_size_t)/128.0;} else if (u_is_size_zoom_constant && !u_is_size_feature_constant) {size=a_size_min/128.0;} else {size=u_size;}vec4 projectedPoint=u_matrix*vec4(a_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float distance_ratio=u_pitch_with_map ?\ncamera_to_anchor_distance/u_camera_to_center_distance :\nu_camera_to_center_distance/camera_to_anchor_distance;highp float perspective_ratio=clamp(0.5+0.5*distance_ratio,0.0,4.0);size*=perspective_ratio;float fontScale=u_is_text ? size/24.0 : size;highp float symbol_rotation=0.0;if (u_rotate_symbol) {vec4 offsetProjectedPoint=u_matrix*vec4(a_pos+vec2(1,0),0,1);vec2 a=projectedPoint.xy/projectedPoint.w;vec2 b=offsetProjectedPoint.xy/offsetProjectedPoint.w;symbol_rotation=atan((b.y-a.y)/u_aspect_ratio,b.x-a.x);}highp float angle_sin=sin(segment_angle+symbol_rotation);highp float angle_cos=cos(segment_angle+symbol_rotation);mat2 rotation_matrix=mat2(angle_cos,-1.0*angle_sin,angle_sin,angle_cos);vec4 projected_pos=u_label_plane_matrix*vec4(a_projected_pos.xy,0.0,1.0);gl_Position=u_coord_matrix*vec4(projected_pos.xy/projected_pos.w+rotation_matrix*(a_offset/32.0*fontScale+a_pxoffset),0.0,1.0);float gamma_scale=gl_Position.w;vec2 fade_opacity=unpack_opacity(a_fade_opacity);float fade_change=fade_opacity[1] > 0.5 ? u_fade_change :-u_fade_change;float interpolated_fade_opacity=max(0.0,min(1.0,fade_opacity[0]+fade_change));v_data0=a_tex/u_texsize;v_data1=vec3(gamma_scale,size,interpolated_fade_opacity);}"),vr=yr("#define SDF_PX 8.0\n#define SDF 1.0\n#define ICON 0.0\nuniform bool u_is_halo;uniform sampler2D u_texture;uniform sampler2D u_texture_icon;uniform highp float u_gamma_scale;uniform lowp float u_device_pixel_ratio;varying vec4 v_data0;varying vec4 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nfloat fade_opacity=v_data1[2];if (v_data1.w==ICON) {vec2 tex_icon=v_data0.zw;lowp float alpha=opacity*fade_opacity;gl_FragColor=texture2D(u_texture_icon,tex_icon)*alpha;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\nreturn;}vec2 tex=v_data0.xy;float EDGE_GAMMA=0.105/u_device_pixel_ratio;float gamma_scale=v_data1.x;float size=v_data1.y;float fontScale=size/24.0;lowp vec4 color=fill_color;highp float gamma=EDGE_GAMMA/(fontScale*u_gamma_scale);lowp float buff=(256.0-64.0)/256.0;if (u_is_halo) {color=halo_color;gamma=(halo_blur*1.19/SDF_PX+EDGE_GAMMA)/(fontScale*u_gamma_scale);buff=(6.0-halo_width/fontScale)/SDF_PX;}lowp float dist=texture2D(u_texture,tex).a;highp float gamma_scaled=gamma*gamma_scale;highp float alpha=smoothstep(buff-gamma_scaled,buff+gamma_scaled,dist);gl_FragColor=color*(alpha*opacity*fade_opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}","const float PI=3.141592653589793;attribute vec4 a_pos_offset;attribute vec4 a_data;attribute vec3 a_projected_pos;attribute float a_fade_opacity;uniform bool u_is_size_zoom_constant;uniform bool u_is_size_feature_constant;uniform highp float u_size_t;uniform highp float u_size;uniform mat4 u_matrix;uniform mat4 u_label_plane_matrix;uniform mat4 u_coord_matrix;uniform bool u_is_text;uniform bool u_pitch_with_map;uniform highp float u_pitch;uniform bool u_rotate_symbol;uniform highp float u_aspect_ratio;uniform highp float u_camera_to_center_distance;uniform float u_fade_change;uniform vec2 u_texsize;uniform vec2 u_texsize_icon;varying vec4 v_data0;varying vec4 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nvec2 a_pos=a_pos_offset.xy;vec2 a_offset=a_pos_offset.zw;vec2 a_tex=a_data.xy;vec2 a_size=a_data.zw;float a_size_min=floor(a_size[0]*0.5);float is_sdf=a_size[0]-2.0*a_size_min;highp float segment_angle=-a_projected_pos[2];float size;if (!u_is_size_zoom_constant && !u_is_size_feature_constant) {size=mix(a_size_min,a_size[1],u_size_t)/128.0;} else if (u_is_size_zoom_constant && !u_is_size_feature_constant) {size=a_size_min/128.0;} else {size=u_size;}vec4 projectedPoint=u_matrix*vec4(a_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float distance_ratio=u_pitch_with_map ?\ncamera_to_anchor_distance/u_camera_to_center_distance :\nu_camera_to_center_distance/camera_to_anchor_distance;highp float perspective_ratio=clamp(0.5+0.5*distance_ratio,0.0,4.0);size*=perspective_ratio;float fontScale=size/24.0;highp float symbol_rotation=0.0;if (u_rotate_symbol) {vec4 offsetProjectedPoint=u_matrix*vec4(a_pos+vec2(1,0),0,1);vec2 a=projectedPoint.xy/projectedPoint.w;vec2 b=offsetProjectedPoint.xy/offsetProjectedPoint.w;symbol_rotation=atan((b.y-a.y)/u_aspect_ratio,b.x-a.x);}highp float angle_sin=sin(segment_angle+symbol_rotation);highp float angle_cos=cos(segment_angle+symbol_rotation);mat2 rotation_matrix=mat2(angle_cos,-1.0*angle_sin,angle_sin,angle_cos);vec4 projected_pos=u_label_plane_matrix*vec4(a_projected_pos.xy,0.0,1.0);gl_Position=u_coord_matrix*vec4(projected_pos.xy/projected_pos.w+rotation_matrix*(a_offset/32.0*fontScale),0.0,1.0);float gamma_scale=gl_Position.w;vec2 fade_opacity=unpack_opacity(a_fade_opacity);float fade_change=fade_opacity[1] > 0.5 ? u_fade_change :-u_fade_change;float interpolated_fade_opacity=max(0.0,min(1.0,fade_opacity[0]+fade_change));v_data0.xy=a_tex/u_texsize;v_data0.zw=a_tex/u_texsize_icon;v_data1=vec4(gamma_scale,size,interpolated_fade_opacity,is_sdf);}");function yr(t,e){var r=/#pragma mapbox: ([\w]+) ([\w]+) ([\w]+) ([\w]+)/g,n={};return{fragmentSource:t=t.replace(r,(function(t,e,r,i,a){return n[a]=!0,"define"===e?"\n#ifndef HAS_UNIFORM_u_"+a+"\nvarying "+r+" "+i+" "+a+";\n#else\nuniform "+r+" "+i+" u_"+a+";\n#endif\n":"\n#ifdef HAS_UNIFORM_u_"+a+"\n "+r+" "+i+" "+a+" = u_"+a+";\n#endif\n"})),vertexSource:e=e.replace(r,(function(t,e,r,i,a){var o="float"===i?"vec2":"vec4",s=a.match(/color/)?"color":o;return n[a]?"define"===e?"\n#ifndef HAS_UNIFORM_u_"+a+"\nuniform lowp float u_"+a+"_t;\nattribute "+r+" "+o+" a_"+a+";\nvarying "+r+" "+i+" "+a+";\n#else\nuniform "+r+" "+i+" u_"+a+";\n#endif\n":"vec4"===s?"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+a+" = a_"+a+";\n#else\n "+r+" "+i+" "+a+" = u_"+a+";\n#endif\n":"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+a+" = unpack_mix_"+s+"(a_"+a+", u_"+a+"_t);\n#else\n "+r+" "+i+" "+a+" = u_"+a+";\n#endif\n":"define"===e?"\n#ifndef HAS_UNIFORM_u_"+a+"\nuniform lowp float u_"+a+"_t;\nattribute "+r+" "+o+" a_"+a+";\n#else\nuniform "+r+" "+i+" u_"+a+";\n#endif\n":"vec4"===s?"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+r+" "+i+" "+a+" = a_"+a+";\n#else\n "+r+" "+i+" "+a+" = u_"+a+";\n#endif\n":"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+r+" "+i+" "+a+" = unpack_mix_"+s+"(a_"+a+", u_"+a+"_t);\n#else\n "+r+" "+i+" "+a+" = u_"+a+";\n#endif\n"}))}}var xr=Object.freeze({__proto__:null,prelude:Ye,background:We,backgroundPattern:Xe,circle:Ze,clippingMask:Je,heatmap:Ke,heatmapTexture:Qe,collisionBox:$e,collisionCircle:tr,debug:er,fill:rr,fillOutline:nr,fillOutlinePattern:ir,fillPattern:ar,fillExtrusion:or,fillExtrusionPattern:sr,hillshadePrepare:lr,hillshade:cr,line:ur,lineGradient:fr,linePattern:hr,lineSDF:pr,raster:dr,symbolIcon:mr,symbolSDF:gr,symbolTextAndIcon:vr}),br=function(){this.boundProgram=null,this.boundLayoutVertexBuffer=null,this.boundPaintVertexBuffers=[],this.boundIndexBuffer=null,this.boundVertexOffset=null,this.boundDynamicVertexBuffer=null,this.vao=null};br.prototype.bind=function(t,e,r,n,i,a,o,s){this.context=t;for(var l=this.boundPaintVertexBuffers.length!==n.length,c=0;!l&&c>16,s>>16],u_pixel_coord_lower:[65535&o,65535&s]}}_r.prototype.draw=function(t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m){var g,v=t.gl;if(!this.failedToCreate){for(var y in t.program.set(this.program),t.setDepthMode(r),t.setStencilMode(n),t.setColorMode(i),t.setCullFace(a),this.fixedUniforms)this.fixedUniforms[y].set(o[y]);p&&p.setUniforms(t,this.binderUniforms,f,{zoom:h});for(var x=(g={},g[v.LINES]=2,g[v.TRIANGLES]=3,g[v.LINE_STRIP]=1,g)[e],b=0,_=u.get();b<_.length;b+=1){var w=_[b],T=w.vaos||(w.vaos={});(T[s]||(T[s]=new br)).bind(t,this,l,p?p.getPaintVertexBuffers():[],c,w.vertexOffset,d,m),v.drawElements(e,w.primitiveLength*x,v.UNSIGNED_SHORT,w.primitiveOffset*x*2)}}};var Tr=function(e,r,n,i){var a=r.style.light,o=a.properties.get("position"),s=[o.x,o.y,o.z],l=t.create$1();"viewport"===a.properties.get("anchor")&&t.fromRotation(l,-r.transform.angle),t.transformMat3(s,s,l);var c=a.properties.get("color");return{u_matrix:e,u_lightpos:s,u_lightintensity:a.properties.get("intensity"),u_lightcolor:[c.r,c.g,c.b],u_vertical_gradient:+n,u_opacity:i}},kr=function(e,r,n,i,a,o,s){return t.extend(Tr(e,r,n,i),wr(o,r,s),{u_height_factor:-Math.pow(2,a.overscaledZ)/s.tileSize/8})},Ar=function(t){return{u_matrix:t}},Mr=function(e,r,n,i){return t.extend(Ar(e),wr(n,r,i))},Sr=function(t,e){return{u_matrix:t,u_world:e}},Er=function(e,r,n,i,a){return t.extend(Mr(e,r,n,i),{u_world:a})},Lr=function(e,r,n,i){var a,o,s=e.transform;if("map"===i.paint.get("circle-pitch-alignment")){var l=pe(n,1,s.zoom);a=!0,o=[l,l]}else a=!1,o=s.pixelsToGLUnits;return{u_camera_to_center_distance:s.cameraToCenterDistance,u_scale_with_map:+("map"===i.paint.get("circle-pitch-scale")),u_matrix:e.translatePosMatrix(r.posMatrix,n,i.paint.get("circle-translate"),i.paint.get("circle-translate-anchor")),u_pitch_with_map:+a,u_device_pixel_ratio:t.browser.devicePixelRatio,u_extrude_scale:o}},Cr=function(t,e,r){var n=pe(r,1,e.zoom),i=Math.pow(2,e.zoom-r.tileID.overscaledZ),a=r.tileID.overscaleFactor();return{u_matrix:t,u_camera_to_center_distance:e.cameraToCenterDistance,u_pixels_to_tile_units:n,u_extrude_scale:[e.pixelsToGLUnits[0]/(n*i),e.pixelsToGLUnits[1]/(n*i)],u_overscale_factor:a}},Pr=function(t,e,r){return{u_matrix:t,u_inv_matrix:e,u_camera_to_center_distance:r.cameraToCenterDistance,u_viewport_size:[r.width,r.height]}},Ir=function(t,e,r){return void 0===r&&(r=1),{u_matrix:t,u_color:e,u_overlay:0,u_overlay_scale:r}},Or=function(t){return{u_matrix:t}},zr=function(t,e,r,n){return{u_matrix:t,u_extrude_scale:pe(e,1,r),u_intensity:n}};function Dr(e,r){var n=Math.pow(2,r.canonical.z),i=r.canonical.y;return[new t.MercatorCoordinate(0,i/n).toLngLat().lat,new t.MercatorCoordinate(0,(i+1)/n).toLngLat().lat]}var Rr=function(e,r,n){var i=e.transform;return{u_matrix:Ur(e,r,n),u_ratio:1/pe(r,1,i.zoom),u_device_pixel_ratio:t.browser.devicePixelRatio,u_units_to_pixels:[1/i.pixelsToGLUnits[0],1/i.pixelsToGLUnits[1]]}},Fr=function(e,r,n){return t.extend(Rr(e,r,n),{u_image:0})},Br=function(e,r,n,i){var a=e.transform,o=jr(r,a);return{u_matrix:Ur(e,r,n),u_texsize:r.imageAtlasTexture.size,u_ratio:1/pe(r,1,a.zoom),u_device_pixel_ratio:t.browser.devicePixelRatio,u_image:0,u_scale:[o,i.fromScale,i.toScale],u_fade:i.t,u_units_to_pixels:[1/a.pixelsToGLUnits[0],1/a.pixelsToGLUnits[1]]}},Nr=function(e,r,n,i,a){var o=e.transform,s=e.lineAtlas,l=jr(r,o),c="round"===n.layout.get("line-cap"),u=s.getDash(i.from,c),f=s.getDash(i.to,c),h=u.width*a.fromScale,p=f.width*a.toScale;return t.extend(Rr(e,r,n),{u_patternscale_a:[l/h,-u.height/2],u_patternscale_b:[l/p,-f.height/2],u_sdfgamma:s.width/(256*Math.min(h,p)*t.browser.devicePixelRatio)/2,u_image:0,u_tex_y_a:u.y,u_tex_y_b:f.y,u_mix:a.t})};function jr(t,e){return 1/pe(t,1,e.tileZoom)}function Ur(t,e,r){return t.translatePosMatrix(e.tileID.posMatrix,e,r.paint.get("line-translate"),r.paint.get("line-translate-anchor"))}var Vr=function(t,e,r,n,i){return{u_matrix:t,u_tl_parent:e,u_scale_parent:r,u_buffer_scale:1,u_fade_t:n.mix,u_opacity:n.opacity*i.paint.get("raster-opacity"),u_image0:0,u_image1:1,u_brightness_low:i.paint.get("raster-brightness-min"),u_brightness_high:i.paint.get("raster-brightness-max"),u_saturation_factor:(o=i.paint.get("raster-saturation"),o>0?1-1/(1.001-o):-o),u_contrast_factor:(a=i.paint.get("raster-contrast"),a>0?1/(1-a):1+a),u_spin_weights:Hr(i.paint.get("raster-hue-rotate"))};var a,o};function Hr(t){t*=Math.PI/180;var e=Math.sin(t),r=Math.cos(t);return[(2*r+1)/3,(-Math.sqrt(3)*e-r+1)/3,(Math.sqrt(3)*e-r+1)/3]}var qr,Gr=function(t,e,r,n,i,a,o,s,l,c){var u=i.transform;return{u_is_size_zoom_constant:+("constant"===t||"source"===t),u_is_size_feature_constant:+("constant"===t||"camera"===t),u_size_t:e?e.uSizeT:0,u_size:e?e.uSize:0,u_camera_to_center_distance:u.cameraToCenterDistance,u_pitch:u.pitch/360*2*Math.PI,u_rotate_symbol:+r,u_aspect_ratio:u.width/u.height,u_fade_change:i.options.fadeDuration?i.symbolFadeChange:1,u_matrix:a,u_label_plane_matrix:o,u_coord_matrix:s,u_is_text:+l,u_pitch_with_map:+n,u_texsize:c,u_texture:0}},Yr=function(e,r,n,i,a,o,s,l,c,u,f){var h=a.transform;return t.extend(Gr(e,r,n,i,a,o,s,l,c,u),{u_gamma_scale:i?Math.cos(h._pitch)*h.cameraToCenterDistance:1,u_device_pixel_ratio:t.browser.devicePixelRatio,u_is_halo:+f})},Wr=function(e,r,n,i,a,o,s,l,c,u){return t.extend(Yr(e,r,n,i,a,o,s,l,!0,c,!0),{u_texsize_icon:u,u_texture_icon:1})},Xr=function(t,e,r){return{u_matrix:t,u_opacity:e,u_color:r}},Zr=function(e,r,n,i,a,o){return t.extend(function(t,e,r,n){var i=r.imageManager.getPattern(t.from.toString()),a=r.imageManager.getPattern(t.to.toString()),o=r.imageManager.getPixelSize(),s=o.width,l=o.height,c=Math.pow(2,n.tileID.overscaledZ),u=n.tileSize*Math.pow(2,r.transform.tileZoom)/c,f=u*(n.tileID.canonical.x+n.tileID.wrap*c),h=u*n.tileID.canonical.y;return{u_image:0,u_pattern_tl_a:i.tl,u_pattern_br_a:i.br,u_pattern_tl_b:a.tl,u_pattern_br_b:a.br,u_texsize:[s,l],u_mix:e.t,u_pattern_size_a:i.displaySize,u_pattern_size_b:a.displaySize,u_scale_a:e.fromScale,u_scale_b:e.toScale,u_tile_units_to_pixels:1/pe(n,1,r.transform.tileZoom),u_pixel_coord_upper:[f>>16,h>>16],u_pixel_coord_lower:[65535&f,65535&h]}}(i,o,n,a),{u_matrix:e,u_opacity:r})},Jr={fillExtrusion:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_lightpos:new t.Uniform3f(e,r.u_lightpos),u_lightintensity:new t.Uniform1f(e,r.u_lightintensity),u_lightcolor:new t.Uniform3f(e,r.u_lightcolor),u_vertical_gradient:new t.Uniform1f(e,r.u_vertical_gradient),u_opacity:new t.Uniform1f(e,r.u_opacity)}},fillExtrusionPattern:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_lightpos:new t.Uniform3f(e,r.u_lightpos),u_lightintensity:new t.Uniform1f(e,r.u_lightintensity),u_lightcolor:new t.Uniform3f(e,r.u_lightcolor),u_vertical_gradient:new t.Uniform1f(e,r.u_vertical_gradient),u_height_factor:new t.Uniform1f(e,r.u_height_factor),u_image:new t.Uniform1i(e,r.u_image),u_texsize:new t.Uniform2f(e,r.u_texsize),u_pixel_coord_upper:new t.Uniform2f(e,r.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,r.u_pixel_coord_lower),u_scale:new t.Uniform3f(e,r.u_scale),u_fade:new t.Uniform1f(e,r.u_fade),u_opacity:new t.Uniform1f(e,r.u_opacity)}},fill:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix)}},fillPattern:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_image:new t.Uniform1i(e,r.u_image),u_texsize:new t.Uniform2f(e,r.u_texsize),u_pixel_coord_upper:new t.Uniform2f(e,r.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,r.u_pixel_coord_lower),u_scale:new t.Uniform3f(e,r.u_scale),u_fade:new t.Uniform1f(e,r.u_fade)}},fillOutline:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_world:new t.Uniform2f(e,r.u_world)}},fillOutlinePattern:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_world:new t.Uniform2f(e,r.u_world),u_image:new t.Uniform1i(e,r.u_image),u_texsize:new t.Uniform2f(e,r.u_texsize),u_pixel_coord_upper:new t.Uniform2f(e,r.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,r.u_pixel_coord_lower),u_scale:new t.Uniform3f(e,r.u_scale),u_fade:new t.Uniform1f(e,r.u_fade)}},circle:function(e,r){return{u_camera_to_center_distance:new t.Uniform1f(e,r.u_camera_to_center_distance),u_scale_with_map:new t.Uniform1i(e,r.u_scale_with_map),u_pitch_with_map:new t.Uniform1i(e,r.u_pitch_with_map),u_extrude_scale:new t.Uniform2f(e,r.u_extrude_scale),u_device_pixel_ratio:new t.Uniform1f(e,r.u_device_pixel_ratio),u_matrix:new t.UniformMatrix4f(e,r.u_matrix)}},collisionBox:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_camera_to_center_distance:new t.Uniform1f(e,r.u_camera_to_center_distance),u_pixels_to_tile_units:new t.Uniform1f(e,r.u_pixels_to_tile_units),u_extrude_scale:new t.Uniform2f(e,r.u_extrude_scale),u_overscale_factor:new t.Uniform1f(e,r.u_overscale_factor)}},collisionCircle:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_inv_matrix:new t.UniformMatrix4f(e,r.u_inv_matrix),u_camera_to_center_distance:new t.Uniform1f(e,r.u_camera_to_center_distance),u_viewport_size:new t.Uniform2f(e,r.u_viewport_size)}},debug:function(e,r){return{u_color:new t.UniformColor(e,r.u_color),u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_overlay:new t.Uniform1i(e,r.u_overlay),u_overlay_scale:new t.Uniform1f(e,r.u_overlay_scale)}},clippingMask:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix)}},heatmap:function(e,r){return{u_extrude_scale:new t.Uniform1f(e,r.u_extrude_scale),u_intensity:new t.Uniform1f(e,r.u_intensity),u_matrix:new t.UniformMatrix4f(e,r.u_matrix)}},heatmapTexture:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_world:new t.Uniform2f(e,r.u_world),u_image:new t.Uniform1i(e,r.u_image),u_color_ramp:new t.Uniform1i(e,r.u_color_ramp),u_opacity:new t.Uniform1f(e,r.u_opacity)}},hillshade:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_image:new t.Uniform1i(e,r.u_image),u_latrange:new t.Uniform2f(e,r.u_latrange),u_light:new t.Uniform2f(e,r.u_light),u_shadow:new t.UniformColor(e,r.u_shadow),u_highlight:new t.UniformColor(e,r.u_highlight),u_accent:new t.UniformColor(e,r.u_accent)}},hillshadePrepare:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_image:new t.Uniform1i(e,r.u_image),u_dimension:new t.Uniform2f(e,r.u_dimension),u_zoom:new t.Uniform1f(e,r.u_zoom),u_maxzoom:new t.Uniform1f(e,r.u_maxzoom),u_unpack:new t.Uniform4f(e,r.u_unpack)}},line:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_ratio:new t.Uniform1f(e,r.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,r.u_device_pixel_ratio),u_units_to_pixels:new t.Uniform2f(e,r.u_units_to_pixels)}},lineGradient:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_ratio:new t.Uniform1f(e,r.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,r.u_device_pixel_ratio),u_units_to_pixels:new t.Uniform2f(e,r.u_units_to_pixels),u_image:new t.Uniform1i(e,r.u_image)}},linePattern:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_texsize:new t.Uniform2f(e,r.u_texsize),u_ratio:new t.Uniform1f(e,r.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,r.u_device_pixel_ratio),u_image:new t.Uniform1i(e,r.u_image),u_units_to_pixels:new t.Uniform2f(e,r.u_units_to_pixels),u_scale:new t.Uniform3f(e,r.u_scale),u_fade:new t.Uniform1f(e,r.u_fade)}},lineSDF:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_ratio:new t.Uniform1f(e,r.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,r.u_device_pixel_ratio),u_units_to_pixels:new t.Uniform2f(e,r.u_units_to_pixels),u_patternscale_a:new t.Uniform2f(e,r.u_patternscale_a),u_patternscale_b:new t.Uniform2f(e,r.u_patternscale_b),u_sdfgamma:new t.Uniform1f(e,r.u_sdfgamma),u_image:new t.Uniform1i(e,r.u_image),u_tex_y_a:new t.Uniform1f(e,r.u_tex_y_a),u_tex_y_b:new t.Uniform1f(e,r.u_tex_y_b),u_mix:new t.Uniform1f(e,r.u_mix)}},raster:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_tl_parent:new t.Uniform2f(e,r.u_tl_parent),u_scale_parent:new t.Uniform1f(e,r.u_scale_parent),u_buffer_scale:new t.Uniform1f(e,r.u_buffer_scale),u_fade_t:new t.Uniform1f(e,r.u_fade_t),u_opacity:new t.Uniform1f(e,r.u_opacity),u_image0:new t.Uniform1i(e,r.u_image0),u_image1:new t.Uniform1i(e,r.u_image1),u_brightness_low:new t.Uniform1f(e,r.u_brightness_low),u_brightness_high:new t.Uniform1f(e,r.u_brightness_high),u_saturation_factor:new t.Uniform1f(e,r.u_saturation_factor),u_contrast_factor:new t.Uniform1f(e,r.u_contrast_factor),u_spin_weights:new t.Uniform3f(e,r.u_spin_weights)}},symbolIcon:function(e,r){return{u_is_size_zoom_constant:new t.Uniform1i(e,r.u_is_size_zoom_constant),u_is_size_feature_constant:new t.Uniform1i(e,r.u_is_size_feature_constant),u_size_t:new t.Uniform1f(e,r.u_size_t),u_size:new t.Uniform1f(e,r.u_size),u_camera_to_center_distance:new t.Uniform1f(e,r.u_camera_to_center_distance),u_pitch:new t.Uniform1f(e,r.u_pitch),u_rotate_symbol:new t.Uniform1i(e,r.u_rotate_symbol),u_aspect_ratio:new t.Uniform1f(e,r.u_aspect_ratio),u_fade_change:new t.Uniform1f(e,r.u_fade_change),u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_label_plane_matrix:new t.UniformMatrix4f(e,r.u_label_plane_matrix),u_coord_matrix:new t.UniformMatrix4f(e,r.u_coord_matrix),u_is_text:new t.Uniform1i(e,r.u_is_text),u_pitch_with_map:new t.Uniform1i(e,r.u_pitch_with_map),u_texsize:new t.Uniform2f(e,r.u_texsize),u_texture:new t.Uniform1i(e,r.u_texture)}},symbolSDF:function(e,r){return{u_is_size_zoom_constant:new t.Uniform1i(e,r.u_is_size_zoom_constant),u_is_size_feature_constant:new t.Uniform1i(e,r.u_is_size_feature_constant),u_size_t:new t.Uniform1f(e,r.u_size_t),u_size:new t.Uniform1f(e,r.u_size),u_camera_to_center_distance:new t.Uniform1f(e,r.u_camera_to_center_distance),u_pitch:new t.Uniform1f(e,r.u_pitch),u_rotate_symbol:new t.Uniform1i(e,r.u_rotate_symbol),u_aspect_ratio:new t.Uniform1f(e,r.u_aspect_ratio),u_fade_change:new t.Uniform1f(e,r.u_fade_change),u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_label_plane_matrix:new t.UniformMatrix4f(e,r.u_label_plane_matrix),u_coord_matrix:new t.UniformMatrix4f(e,r.u_coord_matrix),u_is_text:new t.Uniform1i(e,r.u_is_text),u_pitch_with_map:new t.Uniform1i(e,r.u_pitch_with_map),u_texsize:new t.Uniform2f(e,r.u_texsize),u_texture:new t.Uniform1i(e,r.u_texture),u_gamma_scale:new t.Uniform1f(e,r.u_gamma_scale),u_device_pixel_ratio:new t.Uniform1f(e,r.u_device_pixel_ratio),u_is_halo:new t.Uniform1i(e,r.u_is_halo)}},symbolTextAndIcon:function(e,r){return{u_is_size_zoom_constant:new t.Uniform1i(e,r.u_is_size_zoom_constant),u_is_size_feature_constant:new t.Uniform1i(e,r.u_is_size_feature_constant),u_size_t:new t.Uniform1f(e,r.u_size_t),u_size:new t.Uniform1f(e,r.u_size),u_camera_to_center_distance:new t.Uniform1f(e,r.u_camera_to_center_distance),u_pitch:new t.Uniform1f(e,r.u_pitch),u_rotate_symbol:new t.Uniform1i(e,r.u_rotate_symbol),u_aspect_ratio:new t.Uniform1f(e,r.u_aspect_ratio),u_fade_change:new t.Uniform1f(e,r.u_fade_change),u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_label_plane_matrix:new t.UniformMatrix4f(e,r.u_label_plane_matrix),u_coord_matrix:new t.UniformMatrix4f(e,r.u_coord_matrix),u_is_text:new t.Uniform1i(e,r.u_is_text),u_pitch_with_map:new t.Uniform1i(e,r.u_pitch_with_map),u_texsize:new t.Uniform2f(e,r.u_texsize),u_texsize_icon:new t.Uniform2f(e,r.u_texsize_icon),u_texture:new t.Uniform1i(e,r.u_texture),u_texture_icon:new t.Uniform1i(e,r.u_texture_icon),u_gamma_scale:new t.Uniform1f(e,r.u_gamma_scale),u_device_pixel_ratio:new t.Uniform1f(e,r.u_device_pixel_ratio),u_is_halo:new t.Uniform1i(e,r.u_is_halo)}},background:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_opacity:new t.Uniform1f(e,r.u_opacity),u_color:new t.UniformColor(e,r.u_color)}},backgroundPattern:function(e,r){return{u_matrix:new t.UniformMatrix4f(e,r.u_matrix),u_opacity:new t.Uniform1f(e,r.u_opacity),u_image:new t.Uniform1i(e,r.u_image),u_pattern_tl_a:new t.Uniform2f(e,r.u_pattern_tl_a),u_pattern_br_a:new t.Uniform2f(e,r.u_pattern_br_a),u_pattern_tl_b:new t.Uniform2f(e,r.u_pattern_tl_b),u_pattern_br_b:new t.Uniform2f(e,r.u_pattern_br_b),u_texsize:new t.Uniform2f(e,r.u_texsize),u_mix:new t.Uniform1f(e,r.u_mix),u_pattern_size_a:new t.Uniform2f(e,r.u_pattern_size_a),u_pattern_size_b:new t.Uniform2f(e,r.u_pattern_size_b),u_scale_a:new t.Uniform1f(e,r.u_scale_a),u_scale_b:new t.Uniform1f(e,r.u_scale_b),u_pixel_coord_upper:new t.Uniform2f(e,r.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,r.u_pixel_coord_lower),u_tile_units_to_pixels:new t.Uniform1f(e,r.u_tile_units_to_pixels)}}};function Kr(e,r,n,i,a,o,s){for(var l=e.context,c=l.gl,u=e.useProgram("collisionBox"),f=[],h=0,p=0,d=0;d0){var _=t.create(),w=y;t.mul(_,v.placementInvProjMatrix,e.transform.glCoordMatrix),t.mul(_,_,v.placementViewportMatrix),f.push({circleArray:b,circleOffset:p,transform:w,invTransform:_}),p=h+=b.length/4}x&&u.draw(l,c.LINES,At.disabled,Mt.disabled,e.colorModeForRenderPass(),Et.disabled,Cr(y,e.transform,g),n.id,x.layoutVertexBuffer,x.indexBuffer,x.segments,null,e.transform.zoom,null,null,x.collisionVertexBuffer)}}if(s&&f.length){var T=e.useProgram("collisionCircle"),k=new t.StructArrayLayout2f1f2i16;k.resize(4*h),k._trim();for(var A=0,M=0,S=f;M=0&&(m[v.associatedIconIndex]={shiftedAnchor:S,angle:E})}else ue(v.numGlyphs,p)}if(f){d.clear();for(var C=e.icon.placedSymbolArray,P=0;P0){var s=t.browser.now(),l=(s-e.timeAdded)/o,c=r?(s-r.timeAdded)/o:-1,u=n.getSource(),f=a.coveringZoomLevel({tileSize:u.tileSize,roundZoom:u.roundZoom}),h=!r||Math.abs(r.tileID.overscaledZ-f)>Math.abs(e.tileID.overscaledZ-f),p=h&&e.refreshedUponExpiration?1:t.clamp(h?l:1-c,0,1);return e.refreshedUponExpiration&&l>=1&&(e.refreshedUponExpiration=!1),r?{opacity:1,mix:1-p}:{opacity:p,mix:0}}return{opacity:1,mix:0}}var un=new t.Color(1,0,0,1),fn=new t.Color(0,1,0,1),hn=new t.Color(0,0,1,1),pn=new t.Color(1,0,1,1),dn=new t.Color(0,1,1,1);function mn(t){var e=t.transform.padding;gn(t,t.transform.height-(e.top||0),3,un),gn(t,e.bottom||0,3,fn),vn(t,e.left||0,3,hn),vn(t,t.transform.width-(e.right||0),3,pn);var r=t.transform.centerPoint;!function(t,e,r,n){yn(t,e-1,r-10,2,20,n),yn(t,e-10,r-1,20,2,n)}(t,r.x,t.transform.height-r.y,dn)}function gn(t,e,r,n){yn(t,0,e+r/2,t.transform.width,r,n)}function vn(t,e,r,n){yn(t,e-r/2,0,r,t.transform.height,n)}function yn(e,r,n,i,a,o){var s=e.context,l=s.gl;l.enable(l.SCISSOR_TEST),l.scissor(r*t.browser.devicePixelRatio,n*t.browser.devicePixelRatio,i*t.browser.devicePixelRatio,a*t.browser.devicePixelRatio),s.clear({color:o}),l.disable(l.SCISSOR_TEST)}function xn(e,r,n){var i=e.context,a=i.gl,o=n.posMatrix,s=e.useProgram("debug"),l=At.disabled,c=Mt.disabled,u=e.colorModeForRenderPass();i.activeTexture.set(a.TEXTURE0),e.emptyTexture.bind(a.LINEAR,a.CLAMP_TO_EDGE),s.draw(i,a.LINE_STRIP,l,c,u,Et.disabled,Ir(o,t.Color.red),"$debug",e.debugBuffer,e.tileBorderIndexBuffer,e.debugSegments);var f=r.getTileByID(n.key).latestRawTileData,h=f&&f.byteLength||0,p=Math.floor(h/1024),d=r.getTile(n).tileSize,m=512/Math.min(d,512)*(n.overscaledZ/e.transform.zoom)*.5,g=n.canonical.toString();n.overscaledZ!==n.canonical.z&&(g+=" => "+n.overscaledZ),function(t,e){t.initDebugOverlayCanvas();var r=t.debugOverlayCanvas,n=t.context.gl,i=t.debugOverlayCanvas.getContext("2d");i.clearRect(0,0,r.width,r.height),i.shadowColor="white",i.shadowBlur=2,i.lineWidth=1.5,i.strokeStyle="white",i.textBaseline="top",i.font="bold 36px Open Sans, sans-serif",i.fillText(e,5,5),i.strokeText(e,5,5),t.debugOverlayTexture.update(r),t.debugOverlayTexture.bind(n.LINEAR,n.CLAMP_TO_EDGE)}(e,g+" "+p+"kb"),s.draw(i,a.TRIANGLES,l,c,St.alphaBlended,Et.disabled,Ir(o,t.Color.transparent,m),"$debug",e.debugBuffer,e.quadTriangleIndexBuffer,e.debugSegments)}var bn={symbol:function(e,r,n,i,a){if("translucent"===e.renderPass){var o=Mt.disabled,s=e.colorModeForRenderPass();n.layout.get("text-variable-anchor")&&function(e,r,n,i,a,o,s){for(var l=r.transform,c="map"===a,u="map"===o,f=0,h=e;f256&&this.clearStencil(),r.setColorMode(St.disabled),r.setDepthMode(At.disabled);var i=this.useProgram("clippingMask");this._tileClippingMaskIDs={};for(var a=0,o=e;a256&&this.clearStencil();var t=this.nextStencilID++,e=this.context.gl;return new Mt({func:e.NOTEQUAL,mask:255},t,255,e.KEEP,e.KEEP,e.REPLACE)},_n.prototype.stencilModeForClipping=function(t){var e=this.context.gl;return new Mt({func:e.EQUAL,mask:255},this._tileClippingMaskIDs[t.key],0,e.KEEP,e.KEEP,e.REPLACE)},_n.prototype.stencilConfigForOverlap=function(t){var e,r=this.context.gl,n=t.sort((function(t,e){return e.overscaledZ-t.overscaledZ})),i=n[n.length-1].overscaledZ,a=n[0].overscaledZ-i+1;if(a>1){this.currentStencilSource=void 0,this.nextStencilID+a>256&&this.clearStencil();for(var o={},s=0;s=0;this.currentLayer--){var w=this.style._layers[i[this.currentLayer]],T=a[w.source],k=u[w.source];this._renderTileClippingMasks(w,k),this.renderLayer(this,T,w,k)}for(this.renderPass="translucent",this.currentLayer=0;this.currentLayer0?e.pop():null},_n.prototype.isPatternMissing=function(t){if(!t)return!1;if(!t.from||!t.to)return!0;var e=this.imageManager.getPattern(t.from.toString()),r=this.imageManager.getPattern(t.to.toString());return!e||!r},_n.prototype.useProgram=function(t,e){this.cache=this.cache||{};var r=""+t+(e?e.cacheKey:"")+(this._showOverdrawInspector?"/overdraw":"");return this.cache[r]||(this.cache[r]=new _r(this.context,xr[t],e,Jr[t],this._showOverdrawInspector)),this.cache[r]},_n.prototype.setCustomLayerDefaults=function(){this.context.unbindVAO(),this.context.cullFace.setDefault(),this.context.activeTexture.setDefault(),this.context.pixelStoreUnpack.setDefault(),this.context.pixelStoreUnpackPremultiplyAlpha.setDefault(),this.context.pixelStoreUnpackFlipY.setDefault()},_n.prototype.setBaseState=function(){var t=this.context.gl;this.context.cullFace.set(!1),this.context.viewport.set([0,0,this.width,this.height]),this.context.blendEquation.set(t.FUNC_ADD)},_n.prototype.initDebugOverlayCanvas=function(){if(null==this.debugOverlayCanvas){this.debugOverlayCanvas=t.window.document.createElement("canvas"),this.debugOverlayCanvas.width=512,this.debugOverlayCanvas.height=512;var e=this.context.gl;this.debugOverlayTexture=new t.Texture(this.context,this.debugOverlayCanvas,e.RGBA)}},_n.prototype.destroy=function(){this.emptyTexture.destroy(),this.debugOverlayTexture&&this.debugOverlayTexture.destroy()};var wn=function(t,e){this.points=t,this.planes=e};wn.fromInvProjectionMatrix=function(e,r,n){var i=Math.pow(2,n),a=[[-1,1,-1,1],[1,1,-1,1],[1,-1,-1,1],[-1,-1,-1,1],[-1,1,1,1],[1,1,1,1],[1,-1,1,1],[-1,-1,1,1]].map((function(r){return t.transformMat4([],r,e)})).map((function(e){return t.scale$1([],e,1/e[3]/r*i)})),o=[[0,1,2],[6,5,4],[0,3,7],[2,1,5],[3,2,6],[0,4,5]].map((function(e){var r=t.sub([],a[e[0]],a[e[1]]),n=t.sub([],a[e[2]],a[e[1]]),i=t.normalize([],t.cross([],r,n)),o=-t.dot(i,a[e[1]]);return i.concat(o)}));return new wn(a,o)};var Tn=function(e,r){this.min=e,this.max=r,this.center=t.scale$2([],t.add([],this.min,this.max),.5)};Tn.prototype.quadrant=function(e){for(var r=[e%2==0,e<2],n=t.clone$2(this.min),i=t.clone$2(this.max),a=0;a=0;if(0===o)return 0;o!==r.length&&(n=!1)}if(n)return 2;for(var l=0;l<3;l++){for(var c=Number.MAX_VALUE,u=-Number.MAX_VALUE,f=0;fthis.max[l]-this.min[l])return 0}return 1};var kn=function(t,e,r,n){if(void 0===t&&(t=0),void 0===e&&(e=0),void 0===r&&(r=0),void 0===n&&(n=0),isNaN(t)||t<0||isNaN(e)||e<0||isNaN(r)||r<0||isNaN(n)||n<0)throw new Error("Invalid value for edge-insets, top, bottom, left and right must all be numbers");this.top=t,this.bottom=e,this.left=r,this.right=n};kn.prototype.interpolate=function(e,r,n){return null!=r.top&&null!=e.top&&(this.top=t.number(e.top,r.top,n)),null!=r.bottom&&null!=e.bottom&&(this.bottom=t.number(e.bottom,r.bottom,n)),null!=r.left&&null!=e.left&&(this.left=t.number(e.left,r.left,n)),null!=r.right&&null!=e.right&&(this.right=t.number(e.right,r.right,n)),this},kn.prototype.getCenter=function(e,r){var n=t.clamp((this.left+e-this.right)/2,0,e),i=t.clamp((this.top+r-this.bottom)/2,0,r);return new t.Point(n,i)},kn.prototype.equals=function(t){return this.top===t.top&&this.bottom===t.bottom&&this.left===t.left&&this.right===t.right},kn.prototype.clone=function(){return new kn(this.top,this.bottom,this.left,this.right)},kn.prototype.toJSON=function(){return{top:this.top,bottom:this.bottom,left:this.left,right:this.right}};var An=function(e,r,n,i,a){this.tileSize=512,this.maxValidLatitude=85.051129,this._renderWorldCopies=void 0===a||a,this._minZoom=e||0,this._maxZoom=r||22,this._minPitch=null==n?0:n,this._maxPitch=null==i?60:i,this.setMaxBounds(),this.width=0,this.height=0,this._center=new t.LngLat(0,0),this.zoom=0,this.angle=0,this._fov=.6435011087932844,this._pitch=0,this._unmodified=!0,this._edgeInsets=new kn,this._posMatrixCache={},this._alignedPosMatrixCache={}},Mn={minZoom:{configurable:!0},maxZoom:{configurable:!0},minPitch:{configurable:!0},maxPitch:{configurable:!0},renderWorldCopies:{configurable:!0},worldSize:{configurable:!0},centerOffset:{configurable:!0},size:{configurable:!0},bearing:{configurable:!0},pitch:{configurable:!0},fov:{configurable:!0},zoom:{configurable:!0},center:{configurable:!0},padding:{configurable:!0},centerPoint:{configurable:!0},unmodified:{configurable:!0},point:{configurable:!0}};An.prototype.clone=function(){var t=new An(this._minZoom,this._maxZoom,this._minPitch,this.maxPitch,this._renderWorldCopies);return t.tileSize=this.tileSize,t.latRange=this.latRange,t.width=this.width,t.height=this.height,t._center=this._center,t.zoom=this.zoom,t.angle=this.angle,t._fov=this._fov,t._pitch=this._pitch,t._unmodified=this._unmodified,t._edgeInsets=this._edgeInsets.clone(),t._calcMatrices(),t},Mn.minZoom.get=function(){return this._minZoom},Mn.minZoom.set=function(t){this._minZoom!==t&&(this._minZoom=t,this.zoom=Math.max(this.zoom,t))},Mn.maxZoom.get=function(){return this._maxZoom},Mn.maxZoom.set=function(t){this._maxZoom!==t&&(this._maxZoom=t,this.zoom=Math.min(this.zoom,t))},Mn.minPitch.get=function(){return this._minPitch},Mn.minPitch.set=function(t){this._minPitch!==t&&(this._minPitch=t,this.pitch=Math.max(this.pitch,t))},Mn.maxPitch.get=function(){return this._maxPitch},Mn.maxPitch.set=function(t){this._maxPitch!==t&&(this._maxPitch=t,this.pitch=Math.min(this.pitch,t))},Mn.renderWorldCopies.get=function(){return this._renderWorldCopies},Mn.renderWorldCopies.set=function(t){void 0===t?t=!0:null===t&&(t=!1),this._renderWorldCopies=t},Mn.worldSize.get=function(){return this.tileSize*this.scale},Mn.centerOffset.get=function(){return this.centerPoint._sub(this.size._div(2))},Mn.size.get=function(){return new t.Point(this.width,this.height)},Mn.bearing.get=function(){return-this.angle/Math.PI*180},Mn.bearing.set=function(e){var r=-t.wrap(e,-180,180)*Math.PI/180;this.angle!==r&&(this._unmodified=!1,this.angle=r,this._calcMatrices(),this.rotationMatrix=t.create$2(),t.rotate(this.rotationMatrix,this.rotationMatrix,this.angle))},Mn.pitch.get=function(){return this._pitch/Math.PI*180},Mn.pitch.set=function(e){var r=t.clamp(e,this.minPitch,this.maxPitch)/180*Math.PI;this._pitch!==r&&(this._unmodified=!1,this._pitch=r,this._calcMatrices())},Mn.fov.get=function(){return this._fov/Math.PI*180},Mn.fov.set=function(t){t=Math.max(.01,Math.min(60,t)),this._fov!==t&&(this._unmodified=!1,this._fov=t/180*Math.PI,this._calcMatrices())},Mn.zoom.get=function(){return this._zoom},Mn.zoom.set=function(t){var e=Math.min(Math.max(t,this.minZoom),this.maxZoom);this._zoom!==e&&(this._unmodified=!1,this._zoom=e,this.scale=this.zoomScale(e),this.tileZoom=Math.floor(e),this.zoomFraction=e-this.tileZoom,this._constrain(),this._calcMatrices())},Mn.center.get=function(){return this._center},Mn.center.set=function(t){t.lat===this._center.lat&&t.lng===this._center.lng||(this._unmodified=!1,this._center=t,this._constrain(),this._calcMatrices())},Mn.padding.get=function(){return this._edgeInsets.toJSON()},Mn.padding.set=function(t){this._edgeInsets.equals(t)||(this._unmodified=!1,this._edgeInsets.interpolate(this._edgeInsets,t,1),this._calcMatrices())},Mn.centerPoint.get=function(){return this._edgeInsets.getCenter(this.width,this.height)},An.prototype.isPaddingEqual=function(t){return this._edgeInsets.equals(t)},An.prototype.interpolatePadding=function(t,e,r){this._unmodified=!1,this._edgeInsets.interpolate(t,e,r),this._constrain(),this._calcMatrices()},An.prototype.coveringZoomLevel=function(t){var e=(t.roundZoom?Math.round:Math.floor)(this.zoom+this.scaleZoom(this.tileSize/t.tileSize));return Math.max(0,e)},An.prototype.getVisibleUnwrappedCoordinates=function(e){var r=[new t.UnwrappedTileID(0,e)];if(this._renderWorldCopies)for(var n=this.pointCoordinate(new t.Point(0,0)),i=this.pointCoordinate(new t.Point(this.width,0)),a=this.pointCoordinate(new t.Point(this.width,this.height)),o=this.pointCoordinate(new t.Point(0,this.height)),s=Math.floor(Math.min(n.x,i.x,a.x,o.x)),l=Math.floor(Math.max(n.x,i.x,a.x,o.x)),c=s-1;c<=l+1;c++)0!==c&&r.push(new t.UnwrappedTileID(c,e));return r},An.prototype.coveringTiles=function(e){var r=this.coveringZoomLevel(e),n=r;if(void 0!==e.minzoom&&re.maxzoom&&(r=e.maxzoom);var i=t.MercatorCoordinate.fromLngLat(this.center),a=Math.pow(2,r),o=[a*i.x,a*i.y,0],s=wn.fromInvProjectionMatrix(this.invProjMatrix,this.worldSize,r),l=e.minzoom||0;this.pitch<=60&&this._edgeInsets.top<.1&&(l=r);var c=function(t){return{aabb:new Tn([t*a,0,0],[(t+1)*a,a,0]),zoom:0,x:0,y:0,wrap:t,fullyVisible:!1}},u=[],f=[],h=r,p=e.reparseOverscaled?n:r;if(this._renderWorldCopies)for(var d=1;d<=3;d++)u.push(c(-d)),u.push(c(d));for(u.push(c(0));u.length>0;){var m=u.pop(),g=m.x,v=m.y,y=m.fullyVisible;if(!y){var x=m.aabb.intersects(s);if(0===x)continue;y=2===x}var b=m.aabb.distanceX(o),_=m.aabb.distanceY(o),w=Math.max(Math.abs(b),Math.abs(_)),T=3+(1<T&&m.zoom>=l)f.push({tileID:new t.OverscaledTileID(m.zoom===h?p:m.zoom,m.wrap,m.zoom,g,v),distanceSq:t.sqrLen([o[0]-.5-g,o[1]-.5-v])});else for(var k=0;k<4;k++){var A=(g<<1)+k%2,M=(v<<1)+(k>>1);u.push({aabb:m.aabb.quadrant(k),zoom:m.zoom+1,x:A,y:M,wrap:m.wrap,fullyVisible:y})}}return f.sort((function(t,e){return t.distanceSq-e.distanceSq})).map((function(t){return t.tileID}))},An.prototype.resize=function(t,e){this.width=t,this.height=e,this.pixelsToGLUnits=[2/t,-2/e],this._constrain(),this._calcMatrices()},Mn.unmodified.get=function(){return this._unmodified},An.prototype.zoomScale=function(t){return Math.pow(2,t)},An.prototype.scaleZoom=function(t){return Math.log(t)/Math.LN2},An.prototype.project=function(e){var r=t.clamp(e.lat,-this.maxValidLatitude,this.maxValidLatitude);return new t.Point(t.mercatorXfromLng(e.lng)*this.worldSize,t.mercatorYfromLat(r)*this.worldSize)},An.prototype.unproject=function(e){return new t.MercatorCoordinate(e.x/this.worldSize,e.y/this.worldSize).toLngLat()},Mn.point.get=function(){return this.project(this.center)},An.prototype.setLocationAtPoint=function(e,r){var n=this.pointCoordinate(r),i=this.pointCoordinate(this.centerPoint),a=this.locationCoordinate(e),o=new t.MercatorCoordinate(a.x-(n.x-i.x),a.y-(n.y-i.y));this.center=this.coordinateLocation(o),this._renderWorldCopies&&(this.center=this.center.wrap())},An.prototype.locationPoint=function(t){return this.coordinatePoint(this.locationCoordinate(t))},An.prototype.pointLocation=function(t){return this.coordinateLocation(this.pointCoordinate(t))},An.prototype.locationCoordinate=function(e){return t.MercatorCoordinate.fromLngLat(e)},An.prototype.coordinateLocation=function(t){return t.toLngLat()},An.prototype.pointCoordinate=function(e){var r=[e.x,e.y,0,1],n=[e.x,e.y,1,1];t.transformMat4(r,r,this.pixelMatrixInverse),t.transformMat4(n,n,this.pixelMatrixInverse);var i=r[3],a=n[3],o=r[0]/i,s=n[0]/a,l=r[1]/i,c=n[1]/a,u=r[2]/i,f=n[2]/a,h=u===f?0:(0-u)/(f-u);return new t.MercatorCoordinate(t.number(o,s,h)/this.worldSize,t.number(l,c,h)/this.worldSize)},An.prototype.coordinatePoint=function(e){var r=[e.x*this.worldSize,e.y*this.worldSize,0,1];return t.transformMat4(r,r,this.pixelMatrix),new t.Point(r[0]/r[3],r[1]/r[3])},An.prototype.getBounds=function(){return(new t.LngLatBounds).extend(this.pointLocation(new t.Point(0,0))).extend(this.pointLocation(new t.Point(this.width,0))).extend(this.pointLocation(new t.Point(this.width,this.height))).extend(this.pointLocation(new t.Point(0,this.height)))},An.prototype.getMaxBounds=function(){return this.latRange&&2===this.latRange.length&&this.lngRange&&2===this.lngRange.length?new t.LngLatBounds([this.lngRange[0],this.latRange[0]],[this.lngRange[1],this.latRange[1]]):null},An.prototype.setMaxBounds=function(t){t?(this.lngRange=[t.getWest(),t.getEast()],this.latRange=[t.getSouth(),t.getNorth()],this._constrain()):(this.lngRange=null,this.latRange=[-this.maxValidLatitude,this.maxValidLatitude])},An.prototype.calculatePosMatrix=function(e,r){void 0===r&&(r=!1);var n=e.key,i=r?this._alignedPosMatrixCache:this._posMatrixCache;if(i[n])return i[n];var a=e.canonical,o=this.worldSize/this.zoomScale(a.z),s=a.x+Math.pow(2,a.z)*e.wrap,l=t.identity(new Float64Array(16));return t.translate(l,l,[s*o,a.y*o,0]),t.scale(l,l,[o/t.EXTENT,o/t.EXTENT,1]),t.multiply(l,r?this.alignedProjMatrix:this.projMatrix,l),i[n]=new Float32Array(l),i[n]},An.prototype.customLayerMatrix=function(){return this.mercatorMatrix.slice()},An.prototype._constrain=function(){if(this.center&&this.width&&this.height&&!this._constraining){this._constraining=!0;var e,r,n,i,a=-90,o=90,s=-180,l=180,c=this.size,u=this._unmodified;if(this.latRange){var f=this.latRange;a=t.mercatorYfromLat(f[1])*this.worldSize,e=(o=t.mercatorYfromLat(f[0])*this.worldSize)-ao&&(i=o-g)}if(this.lngRange){var v=p.x,y=c.x/2;v-yl&&(n=l-y)}void 0===n&&void 0===i||(this.center=this.unproject(new t.Point(void 0!==n?n:p.x,void 0!==i?i:p.y))),this._unmodified=u,this._constraining=!1}},An.prototype._calcMatrices=function(){if(this.height){var e=this._fov/2,r=this.centerOffset;this.cameraToCenterDistance=.5/Math.tan(e)*this.height;var n=Math.PI/2+this._pitch,i=this._fov*(.5+r.y/this.height),a=Math.sin(i)*this.cameraToCenterDistance/Math.sin(t.clamp(Math.PI-n-i,.01,Math.PI-.01)),o=this.point,s=o.x,l=o.y,c=1.01*(Math.cos(Math.PI/2-this._pitch)*a+this.cameraToCenterDistance),u=this.height/50,f=new Float64Array(16);t.perspective(f,this._fov,this.width/this.height,u,c),f[8]=2*-r.x/this.width,f[9]=2*r.y/this.height,t.scale(f,f,[1,-1,1]),t.translate(f,f,[0,0,-this.cameraToCenterDistance]),t.rotateX(f,f,this._pitch),t.rotateZ(f,f,this.angle),t.translate(f,f,[-s,-l,0]),this.mercatorMatrix=t.scale([],f,[this.worldSize,this.worldSize,this.worldSize]),t.scale(f,f,[1,1,t.mercatorZfromAltitude(1,this.center.lat)*this.worldSize,1]),this.projMatrix=f,this.invProjMatrix=t.invert([],this.projMatrix);var h=this.width%2/2,p=this.height%2/2,d=Math.cos(this.angle),m=Math.sin(this.angle),g=s-Math.round(s)+d*h+m*p,v=l-Math.round(l)+d*p+m*h,y=new Float64Array(f);if(t.translate(y,y,[g>.5?g-1:g,v>.5?v-1:v,0]),this.alignedProjMatrix=y,f=t.create(),t.scale(f,f,[this.width/2,-this.height/2,1]),t.translate(f,f,[1,-1,0]),this.labelPlaneMatrix=f,f=t.create(),t.scale(f,f,[1,-1,1]),t.translate(f,f,[-1,-1,0]),t.scale(f,f,[2/this.width,2/this.height,1]),this.glCoordMatrix=f,this.pixelMatrix=t.multiply(new Float64Array(16),this.labelPlaneMatrix,this.projMatrix),!(f=t.invert(new Float64Array(16),this.pixelMatrix)))throw new Error("failed to invert matrix");this.pixelMatrixInverse=f,this._posMatrixCache={},this._alignedPosMatrixCache={}}},An.prototype.maxPitchScaleFactor=function(){if(!this.pixelMatrixInverse)return 1;var e=this.pointCoordinate(new t.Point(0,0)),r=[e.x*this.worldSize,e.y*this.worldSize,0,1];return t.transformMat4(r,r,this.pixelMatrix)[3]/this.cameraToCenterDistance},An.prototype.getCameraPoint=function(){var e=this._pitch,r=Math.tan(e)*(this.cameraToCenterDistance||1);return this.centerPoint.add(new t.Point(0,r))},An.prototype.getCameraQueryGeometry=function(e){var r=this.getCameraPoint();if(1===e.length)return[e[0],r];for(var n=r.x,i=r.y,a=r.x,o=r.y,s=0,l=e;s=3&&!t.some((function(t){return isNaN(t)}))){var e=this._map.dragRotate.isEnabled()&&this._map.touchZoomRotate.isEnabled()?+(t[3]||0):this._map.getBearing();return this._map.jumpTo({center:[+t[2],+t[1]],zoom:+t[0],bearing:e,pitch:+(t[4]||0)}),!0}return!1},Sn.prototype._updateHashUnthrottled=function(){var e=this.getHashString();try{t.window.history.replaceState(t.window.history.state,"",e)}catch(t){}};var En={linearity:.3,easing:t.bezier(0,0,.3,1)},Ln=t.extend({deceleration:2500,maxSpeed:1400},En),Cn=t.extend({deceleration:20,maxSpeed:1400},En),Pn=t.extend({deceleration:1e3,maxSpeed:360},En),In=t.extend({deceleration:1e3,maxSpeed:90},En),On=function(t){this._map=t,this.clear()};function zn(t,e){(!t.duration||t.duration0&&r-e[0].time>160;)e.shift()},On.prototype._onMoveEnd=function(e){if(this._drainInertiaBuffer(),!(this._inertiaBuffer.length<2)){for(var r={zoom:0,bearing:0,pitch:0,pan:new t.Point(0,0),pinchAround:void 0,around:void 0},n=0,i=this._inertiaBuffer;n=this._clickTolerance||this._map.fire(new Rn(t.type,this._map,t))},Nn.prototype.dblclick=function(t){return this._firePreventable(new Rn(t.type,this._map,t))},Nn.prototype.mouseover=function(t){this._map.fire(new Rn(t.type,this._map,t))},Nn.prototype.mouseout=function(t){this._map.fire(new Rn(t.type,this._map,t))},Nn.prototype.touchstart=function(t){return this._firePreventable(new Fn(t.type,this._map,t))},Nn.prototype.touchmove=function(t){this._map.fire(new Fn(t.type,this._map,t))},Nn.prototype.touchend=function(t){this._map.fire(new Fn(t.type,this._map,t))},Nn.prototype.touchcancel=function(t){this._map.fire(new Fn(t.type,this._map,t))},Nn.prototype._firePreventable=function(t){if(this._map.fire(t),t.defaultPrevented)return{}},Nn.prototype.isEnabled=function(){return!0},Nn.prototype.isActive=function(){return!1},Nn.prototype.enable=function(){},Nn.prototype.disable=function(){};var jn=function(t){this._map=t};jn.prototype.reset=function(){this._delayContextMenu=!1,delete this._contextMenuEvent},jn.prototype.mousemove=function(t){this._map.fire(new Rn(t.type,this._map,t))},jn.prototype.mousedown=function(){this._delayContextMenu=!0},jn.prototype.mouseup=function(){this._delayContextMenu=!1,this._contextMenuEvent&&(this._map.fire(new Rn("contextmenu",this._map,this._contextMenuEvent)),delete this._contextMenuEvent)},jn.prototype.contextmenu=function(t){this._delayContextMenu?this._contextMenuEvent=t:this._map.fire(new Rn(t.type,this._map,t)),this._map.listens("contextmenu")&&t.preventDefault()},jn.prototype.isEnabled=function(){return!0},jn.prototype.isActive=function(){return!1},jn.prototype.enable=function(){},jn.prototype.disable=function(){};var Un=function(t,e){this._map=t,this._el=t.getCanvasContainer(),this._container=t.getContainer(),this._clickTolerance=e.clickTolerance||1};function Vn(t,e){for(var r={},n=0;nthis.numTouches)&&(this.aborted=!0),this.aborted||(void 0===this.startTime&&(this.startTime=e.timeStamp),n.length===this.numTouches&&(this.centroid=function(e){for(var r=new t.Point(0,0),n=0,i=e;n30)&&(this.aborted=!0)}}},Hn.prototype.touchend=function(t,e,r){if((!this.centroid||t.timeStamp-this.startTime>500)&&(this.aborted=!0),0===r.length){var n=!this.aborted&&this.centroid;if(this.reset(),n)return n}};var qn=function(t){this.singleTap=new Hn(t),this.numTaps=t.numTaps,this.reset()};qn.prototype.reset=function(){this.lastTime=1/0,delete this.lastTap,this.count=0,this.singleTap.reset()},qn.prototype.touchstart=function(t,e,r){this.singleTap.touchstart(t,e,r)},qn.prototype.touchmove=function(t,e,r){this.singleTap.touchmove(t,e,r)},qn.prototype.touchend=function(t,e,r){var n=this.singleTap.touchend(t,e,r);if(n){var i=t.timeStamp-this.lastTime<500,a=!this.lastTap||this.lastTap.dist(n)<30;if(i&&a||this.reset(),this.count++,this.lastTime=t.timeStamp,this.lastTap=n,this.count===this.numTaps)return this.reset(),n}};var Gn=function(){this._zoomIn=new qn({numTouches:1,numTaps:2}),this._zoomOut=new qn({numTouches:2,numTaps:1}),this.reset()};Gn.prototype.reset=function(){this._active=!1,this._zoomIn.reset(),this._zoomOut.reset()},Gn.prototype.touchstart=function(t,e,r){this._zoomIn.touchstart(t,e,r),this._zoomOut.touchstart(t,e,r)},Gn.prototype.touchmove=function(t,e,r){this._zoomIn.touchmove(t,e,r),this._zoomOut.touchmove(t,e,r)},Gn.prototype.touchend=function(t,e,r){var n=this,i=this._zoomIn.touchend(t,e,r),a=this._zoomOut.touchend(t,e,r);return i?(this._active=!0,t.preventDefault(),setTimeout((function(){return n.reset()}),0),{cameraAnimation:function(e){return e.easeTo({duration:300,zoom:e.getZoom()+1,around:e.unproject(i)},{originalEvent:t})}}):a?(this._active=!0,t.preventDefault(),setTimeout((function(){return n.reset()}),0),{cameraAnimation:function(e){return e.easeTo({duration:300,zoom:e.getZoom()-1,around:e.unproject(a)},{originalEvent:t})}}):void 0},Gn.prototype.touchcancel=function(){this.reset()},Gn.prototype.enable=function(){this._enabled=!0},Gn.prototype.disable=function(){this._enabled=!1,this.reset()},Gn.prototype.isEnabled=function(){return this._enabled},Gn.prototype.isActive=function(){return this._active};var Yn=function(t){this.reset(),this._clickTolerance=t.clickTolerance||1};Yn.prototype.reset=function(){this._active=!1,this._moved=!1,delete this._lastPoint,delete this._eventButton},Yn.prototype._correctButton=function(t,e){return!1},Yn.prototype._move=function(t,e){return{}},Yn.prototype.mousedown=function(t,e){if(!this._lastPoint){var n=r.mouseButton(t);this._correctButton(t,n)&&(this._lastPoint=e,this._eventButton=n)}},Yn.prototype.mousemoveWindow=function(t,e){var r=this._lastPoint;if(r&&(t.preventDefault(),this._moved||!(e.dist(r)0&&(this._active=!0);var i=Vn(n,r),a=new t.Point(0,0),o=new t.Point(0,0),s=0;for(var l in i){var c=i[l],u=this._touches[l];u&&(a._add(c),o._add(c.sub(u)),s++,i[l]=c)}if(this._touches=i,!(sMath.abs(t.x)}var ii=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.reset=function(){t.prototype.reset.call(this),this._valid=void 0,delete this._firstMove,delete this._lastPoints},e.prototype._start=function(t){this._lastPoints=t,ni(t[0].sub(t[1]))&&(this._valid=!1)},e.prototype._move=function(t,e,r){var n=t[0].sub(this._lastPoints[0]),i=t[1].sub(this._lastPoints[1]);if(this._valid=this.gestureBeginsVertically(n,i,r.timeStamp),this._valid){this._lastPoints=t,this._active=!0;return{pitchDelta:-.5*((n.y+i.y)/2)}}},e.prototype.gestureBeginsVertically=function(t,e,r){if(void 0!==this._valid)return this._valid;var n=t.mag()>=2,i=e.mag()>=2;if(n||i){if(!n||!i)return void 0===this._firstMove&&(this._firstMove=r),r-this._firstMove<100&&void 0;var a=t.y>0==e.y>0;return ni(t)&&ni(e)&&a}},e}(Kn),ai={panStep:100,bearingStep:15,pitchStep:10},oi=function(){var t=ai;this._panStep=t.panStep,this._bearingStep=t.bearingStep,this._pitchStep=t.pitchStep};function si(t){return t*(2-t)}oi.prototype.reset=function(){this._active=!1},oi.prototype.keydown=function(t){var e=this;if(!(t.altKey||t.ctrlKey||t.metaKey)){var r=0,n=0,i=0,a=0,o=0;switch(t.keyCode){case 61:case 107:case 171:case 187:r=1;break;case 189:case 109:case 173:r=-1;break;case 37:t.shiftKey?n=-1:(t.preventDefault(),a=-1);break;case 39:t.shiftKey?n=1:(t.preventDefault(),a=1);break;case 38:t.shiftKey?i=1:(t.preventDefault(),o=-1);break;case 40:t.shiftKey?i=-1:(t.preventDefault(),o=1);break;default:return}return{cameraAnimation:function(s){var l=s.getZoom();s.easeTo({duration:300,easeId:"keyboardHandler",easing:si,zoom:r?Math.round(l)+r*(t.shiftKey?2:1):l,bearing:s.getBearing()+n*e._bearingStep,pitch:s.getPitch()+i*e._pitchStep,offset:[-a*e._panStep,-o*e._panStep],center:s.getCenter()},{originalEvent:t})}}}},oi.prototype.enable=function(){this._enabled=!0},oi.prototype.disable=function(){this._enabled=!1,this.reset()},oi.prototype.isEnabled=function(){return this._enabled},oi.prototype.isActive=function(){return this._active};var li=function(e,r){this._map=e,this._el=e.getCanvasContainer(),this._handler=r,this._delta=0,this._defaultZoomRate=.01,this._wheelZoomRate=1/450,t.bindAll(["_onWheel","_onTimeout","_onScrollFrame","_onScrollFinished"],this)};li.prototype.setZoomRate=function(t){this._defaultZoomRate=t},li.prototype.setWheelZoomRate=function(t){this._wheelZoomRate=t},li.prototype.isEnabled=function(){return!!this._enabled},li.prototype.isActive=function(){return!!this._active||void 0!==this._finishTimeout},li.prototype.isZooming=function(){return!!this._zooming},li.prototype.enable=function(t){this.isEnabled()||(this._enabled=!0,this._aroundCenter=t&&"center"===t.around)},li.prototype.disable=function(){this.isEnabled()&&(this._enabled=!1)},li.prototype.wheel=function(e){if(this.isEnabled()){var r=e.deltaMode===t.window.WheelEvent.DOM_DELTA_LINE?40*e.deltaY:e.deltaY,n=t.browser.now(),i=n-(this._lastWheelEventTime||0);this._lastWheelEventTime=n,0!==r&&r%4.000244140625==0?this._type="wheel":0!==r&&Math.abs(r)<4?this._type="trackpad":i>400?(this._type=null,this._lastValue=r,this._timeout=setTimeout(this._onTimeout,40,e)):this._type||(this._type=Math.abs(i*r)<200?"trackpad":"wheel",this._timeout&&(clearTimeout(this._timeout),this._timeout=null,r+=this._lastValue)),e.shiftKey&&r&&(r/=4),this._type&&(this._lastWheelEvent=e,this._delta-=r,this._active||this._start(e)),e.preventDefault()}},li.prototype._onTimeout=function(t){this._type="wheel",this._delta-=this._lastValue,this._active||this._start(t)},li.prototype._start=function(e){if(this._delta){this._frameId&&(this._frameId=null),this._active=!0,this.isZooming()||(this._zooming=!0),this._finishTimeout&&(clearTimeout(this._finishTimeout),delete this._finishTimeout);var n=r.mousePos(this._el,e);this._around=t.LngLat.convert(this._aroundCenter?this._map.getCenter():this._map.unproject(n)),this._aroundPoint=this._map.transform.locationPoint(this._around),this._frameId||(this._frameId=!0,this._handler._triggerRenderFrame())}},li.prototype.renderFrame=function(){return this._onScrollFrame()},li.prototype._onScrollFrame=function(){var e=this;if(this._frameId&&(this._frameId=null,this.isActive())){var r=this._map.transform;if(0!==this._delta){var n="wheel"===this._type&&Math.abs(this._delta)>4.000244140625?this._wheelZoomRate:this._defaultZoomRate,i=2/(1+Math.exp(-Math.abs(this._delta*n)));this._delta<0&&0!==i&&(i=1/i);var a="number"==typeof this._targetZoom?r.zoomScale(this._targetZoom):r.scale;this._targetZoom=Math.min(r.maxZoom,Math.max(r.minZoom,r.scaleZoom(a*i))),"wheel"===this._type&&(this._startZoom=r.zoom,this._easing=this._smoothOutEasing(200)),this._delta=0}var o,s="number"==typeof this._targetZoom?this._targetZoom:r.zoom,l=this._startZoom,c=this._easing,u=!1;if("wheel"===this._type&&l&&c){var f=Math.min((t.browser.now()-this._lastWheelEventTime)/200,1),h=c(f);o=t.number(l,s,h),f<1?this._frameId||(this._frameId=!0):u=!0}else o=s,u=!0;return this._active=!0,u&&(this._active=!1,this._finishTimeout=setTimeout((function(){e._zooming=!1,e._handler._triggerRenderFrame(),delete e._targetZoom,delete e._finishTimeout}),200)),{noInertia:!0,needsRenderFrame:!u,zoomDelta:o-r.zoom,around:this._aroundPoint,originalEvent:this._lastWheelEvent}}},li.prototype._smoothOutEasing=function(e){var r=t.ease;if(this._prevEase){var n=this._prevEase,i=(t.browser.now()-n.start)/n.duration,a=n.easing(i+.01)-n.easing(i),o=.27/Math.sqrt(a*a+1e-4)*.01,s=Math.sqrt(.0729-o*o);r=t.bezier(o,s,.25,1)}return this._prevEase={start:t.browser.now(),duration:e,easing:r},r},li.prototype.reset=function(){this._active=!1};var ci=function(t,e){this._clickZoom=t,this._tapZoom=e};ci.prototype.enable=function(){this._clickZoom.enable(),this._tapZoom.enable()},ci.prototype.disable=function(){this._clickZoom.disable(),this._tapZoom.disable()},ci.prototype.isEnabled=function(){return this._clickZoom.isEnabled()&&this._tapZoom.isEnabled()},ci.prototype.isActive=function(){return this._clickZoom.isActive()||this._tapZoom.isActive()};var ui=function(){this.reset()};ui.prototype.reset=function(){this._active=!1},ui.prototype.dblclick=function(t,e){return t.preventDefault(),{cameraAnimation:function(r){r.easeTo({duration:300,zoom:r.getZoom()+(t.shiftKey?-1:1),around:r.unproject(e)},{originalEvent:t})}}},ui.prototype.enable=function(){this._enabled=!0},ui.prototype.disable=function(){this._enabled=!1,this.reset()},ui.prototype.isEnabled=function(){return this._enabled},ui.prototype.isActive=function(){return this._active};var fi=function(){this._tap=new qn({numTouches:1,numTaps:1}),this.reset()};fi.prototype.reset=function(){this._active=!1,delete this._swipePoint,delete this._swipeTouch,delete this._tapTime,this._tap.reset()},fi.prototype.touchstart=function(t,e,r){this._swipePoint||(this._tapTime&&t.timeStamp-this._tapTime>500&&this.reset(),this._tapTime?r.length>0&&(this._swipePoint=e[0],this._swipeTouch=r[0].identifier):this._tap.touchstart(t,e,r))},fi.prototype.touchmove=function(t,e,r){if(this._tapTime){if(this._swipePoint){if(r[0].identifier!==this._swipeTouch)return;var n=e[0],i=n.y-this._swipePoint.y;return this._swipePoint=n,t.preventDefault(),this._active=!0,{zoomDelta:i/128}}}else this._tap.touchmove(t,e,r)},fi.prototype.touchend=function(t,e,r){this._tapTime?this._swipePoint&&0===r.length&&this.reset():this._tap.touchend(t,e,r)&&(this._tapTime=t.timeStamp)},fi.prototype.touchcancel=function(){this.reset()},fi.prototype.enable=function(){this._enabled=!0},fi.prototype.disable=function(){this._enabled=!1,this.reset()},fi.prototype.isEnabled=function(){return this._enabled},fi.prototype.isActive=function(){return this._active};var hi=function(t,e,r){this._el=t,this._mousePan=e,this._touchPan=r};hi.prototype.enable=function(t){this._inertiaOptions=t||{},this._mousePan.enable(),this._touchPan.enable(),this._el.classList.add("mapboxgl-touch-drag-pan")},hi.prototype.disable=function(){this._mousePan.disable(),this._touchPan.disable(),this._el.classList.remove("mapboxgl-touch-drag-pan")},hi.prototype.isEnabled=function(){return this._mousePan.isEnabled()&&this._touchPan.isEnabled()},hi.prototype.isActive=function(){return this._mousePan.isActive()||this._touchPan.isActive()};var pi=function(t,e,r){this._pitchWithRotate=t.pitchWithRotate,this._mouseRotate=e,this._mousePitch=r};pi.prototype.enable=function(){this._mouseRotate.enable(),this._pitchWithRotate&&this._mousePitch.enable()},pi.prototype.disable=function(){this._mouseRotate.disable(),this._mousePitch.disable()},pi.prototype.isEnabled=function(){return this._mouseRotate.isEnabled()&&(!this._pitchWithRotate||this._mousePitch.isEnabled())},pi.prototype.isActive=function(){return this._mouseRotate.isActive()||this._mousePitch.isActive()};var di=function(t,e,r,n){this._el=t,this._touchZoom=e,this._touchRotate=r,this._tapDragZoom=n,this._rotationDisabled=!1,this._enabled=!0};di.prototype.enable=function(t){this._touchZoom.enable(t),this._rotationDisabled||this._touchRotate.enable(t),this._tapDragZoom.enable(),this._el.classList.add("mapboxgl-touch-zoom-rotate")},di.prototype.disable=function(){this._touchZoom.disable(),this._touchRotate.disable(),this._tapDragZoom.disable(),this._el.classList.remove("mapboxgl-touch-zoom-rotate")},di.prototype.isEnabled=function(){return this._touchZoom.isEnabled()&&(this._rotationDisabled||this._touchRotate.isEnabled())&&this._tapDragZoom.isEnabled()},di.prototype.isActive=function(){return this._touchZoom.isActive()||this._touchRotate.isActive()||this._tapDragZoom.isActive()},di.prototype.disableRotation=function(){this._rotationDisabled=!0,this._touchRotate.disable()},di.prototype.enableRotation=function(){this._rotationDisabled=!1,this._touchZoom.isEnabled()&&this._touchRotate.enable()};var mi=function(t){return t.zoom||t.drag||t.pitch||t.rotate},gi=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e}(t.Event);function vi(t){return t.panDelta&&t.panDelta.mag()||t.zoomDelta||t.bearingDelta||t.pitchDelta}var yi=function(e,n){this._map=e,this._el=this._map.getCanvasContainer(),this._handlers=[],this._handlersById={},this._changes=[],this._inertia=new On(e),this._bearingSnap=n.bearingSnap,this._previousActiveHandlers={},this._eventsInProgress={},this._addDefaultHandlers(n),t.bindAll(["handleEvent","handleWindowEvent"],this);var i=this._el;this._listeners=[[i,"touchstart",{passive:!1}],[i,"touchmove",{passive:!1}],[i,"touchend",void 0],[i,"touchcancel",void 0],[i,"mousedown",void 0],[i,"mousemove",void 0],[i,"mouseup",void 0],[t.window.document,"mousemove",{capture:!0}],[t.window.document,"mouseup",void 0],[i,"mouseover",void 0],[i,"mouseout",void 0],[i,"dblclick",void 0],[i,"click",void 0],[i,"keydown",{capture:!1}],[i,"keyup",void 0],[i,"wheel",{passive:!1}],[i,"contextmenu",void 0],[t.window,"blur",void 0]];for(var a=0,o=this._listeners;aa?Math.min(2,_):Math.max(.5,_),w=Math.pow(g,1-e),T=i.unproject(x.add(b.mult(e*w)).mult(m));i.setLocationAtPoint(i.renderWorldCopies?T.wrap():T,d)}n._fireMoveEvents(r)}),(function(t){n._afterEase(r,t)}),e),this},r.prototype._prepareEase=function(e,r,n){void 0===n&&(n={}),this._moving=!0,r||n.moving||this.fire(new t.Event("movestart",e)),this._zooming&&!n.zooming&&this.fire(new t.Event("zoomstart",e)),this._rotating&&!n.rotating&&this.fire(new t.Event("rotatestart",e)),this._pitching&&!n.pitching&&this.fire(new t.Event("pitchstart",e))},r.prototype._fireMoveEvents=function(e){this.fire(new t.Event("move",e)),this._zooming&&this.fire(new t.Event("zoom",e)),this._rotating&&this.fire(new t.Event("rotate",e)),this._pitching&&this.fire(new t.Event("pitch",e))},r.prototype._afterEase=function(e,r){if(!this._easeId||!r||this._easeId!==r){delete this._easeId;var n=this._zooming,i=this._rotating,a=this._pitching;this._moving=!1,this._zooming=!1,this._rotating=!1,this._pitching=!1,this._padding=!1,n&&this.fire(new t.Event("zoomend",e)),i&&this.fire(new t.Event("rotateend",e)),a&&this.fire(new t.Event("pitchend",e)),this.fire(new t.Event("moveend",e))}},r.prototype.flyTo=function(e,r){var n=this;if(!e.essential&&t.browser.prefersReducedMotion){var i=t.pick(e,["center","zoom","bearing","pitch","around"]);return this.jumpTo(i,r)}this.stop(),e=t.extend({offset:[0,0],speed:1.2,curve:1.42,easing:t.ease},e);var a=this.transform,o=this.getZoom(),s=this.getBearing(),l=this.getPitch(),c=this.getPadding(),u="zoom"in e?t.clamp(+e.zoom,a.minZoom,a.maxZoom):o,f="bearing"in e?this._normalizeBearing(e.bearing,s):s,h="pitch"in e?+e.pitch:l,p="padding"in e?e.padding:a.padding,d=a.zoomScale(u-o),m=t.Point.convert(e.offset),g=a.centerPoint.add(m),v=a.pointLocation(g),y=t.LngLat.convert(e.center||v);this._normalizeCenter(y);var x=a.project(v),b=a.project(y).sub(x),_=e.curve,w=Math.max(a.width,a.height),T=w/d,k=b.mag();if("minZoom"in e){var A=t.clamp(Math.min(e.minZoom,o,u),a.minZoom,a.maxZoom),M=w/a.zoomScale(A-o);_=Math.sqrt(M/k*2)}var S=_*_;function E(t){var e=(T*T-w*w+(t?-1:1)*S*S*k*k)/(2*(t?T:w)*S*k);return Math.log(Math.sqrt(e*e+1)-e)}function L(t){return(Math.exp(t)-Math.exp(-t))/2}function C(t){return(Math.exp(t)+Math.exp(-t))/2}var P=E(0),I=function(t){return C(P)/C(P+_*t)},O=function(t){return w*((C(P)*(L(e=P+_*t)/C(e))-L(P))/S)/k;var e},z=(E(1)-P)/_;if(Math.abs(k)<1e-6||!isFinite(z)){if(Math.abs(w-T)<1e-6)return this.easeTo(e,r);var D=Te.maxDuration&&(e.duration=0),this._zooming=!0,this._rotating=s!==f,this._pitching=h!==l,this._padding=!a.isPaddingEqual(p),this._prepareEase(r,!1),this._ease((function(e){var i=e*z,d=1/I(i);a.zoom=1===e?u:o+a.scaleZoom(d),n._rotating&&(a.bearing=t.number(s,f,e)),n._pitching&&(a.pitch=t.number(l,h,e)),n._padding&&(a.interpolatePadding(c,p,e),g=a.centerPoint.add(m));var v=1===e?y:a.unproject(x.add(b.mult(O(i))).mult(d));a.setLocationAtPoint(a.renderWorldCopies?v.wrap():v,g),n._fireMoveEvents(r)}),(function(){return n._afterEase(r)}),e),this},r.prototype.isEasing=function(){return!!this._easeFrameId},r.prototype.stop=function(){return this._stop()},r.prototype._stop=function(t,e){if(this._easeFrameId&&(this._cancelRenderFrame(this._easeFrameId),delete this._easeFrameId,delete this._onEaseFrame),this._onEaseEnd){var r=this._onEaseEnd;delete this._onEaseEnd,r.call(this,e)}if(!t){var n=this.handlers;n&&n.stop()}return this},r.prototype._ease=function(e,r,n){!1===n.animate||0===n.duration?(e(1),r()):(this._easeStart=t.browser.now(),this._easeOptions=n,this._onEaseFrame=e,this._onEaseEnd=r,this._easeFrameId=this._requestRenderFrame(this._renderFrameCallback))},r.prototype._renderFrameCallback=function(){var e=Math.min((t.browser.now()-this._easeStart)/this._easeOptions.duration,1);this._onEaseFrame(this._easeOptions.easing(e)),e<1?this._easeFrameId=this._requestRenderFrame(this._renderFrameCallback):this.stop()},r.prototype._normalizeBearing=function(e,r){e=t.wrap(e,-180,180);var n=Math.abs(e-r);return Math.abs(e-360-r)180?-360:r<-180?360:0}},r}(t.Evented),bi=function(e){void 0===e&&(e={}),this.options=e,t.bindAll(["_updateEditLink","_updateData","_updateCompact"],this)};bi.prototype.getDefaultPosition=function(){return"bottom-right"},bi.prototype.onAdd=function(t){var e=this.options&&this.options.compact;return this._map=t,this._container=r.create("div","mapboxgl-ctrl mapboxgl-ctrl-attrib"),this._innerContainer=r.create("div","mapboxgl-ctrl-attrib-inner",this._container),e&&this._container.classList.add("mapboxgl-compact"),this._updateAttributions(),this._updateEditLink(),this._map.on("styledata",this._updateData),this._map.on("sourcedata",this._updateData),this._map.on("moveend",this._updateEditLink),void 0===e&&(this._map.on("resize",this._updateCompact),this._updateCompact()),this._container},bi.prototype.onRemove=function(){r.remove(this._container),this._map.off("styledata",this._updateData),this._map.off("sourcedata",this._updateData),this._map.off("moveend",this._updateEditLink),this._map.off("resize",this._updateCompact),this._map=void 0,this._attribHTML=void 0},bi.prototype._updateEditLink=function(){var e=this._editLink;e||(e=this._editLink=this._container.querySelector(".mapbox-improve-map"));var r=[{key:"owner",value:this.styleOwner},{key:"id",value:this.styleId},{key:"access_token",value:this._map._requestManager._customAccessToken||t.config.ACCESS_TOKEN}];if(e){var n=r.reduce((function(t,e,n){return e.value&&(t+=e.key+"="+e.value+(n=0)return!1;return!0}))).join(" | ");o!==this._attribHTML&&(this._attribHTML=o,t.length?(this._innerContainer.innerHTML=o,this._container.classList.remove("mapboxgl-attrib-empty")):this._container.classList.add("mapboxgl-attrib-empty"),this._editLink=null)}},bi.prototype._updateCompact=function(){this._map.getCanvasContainer().offsetWidth<=640?this._container.classList.add("mapboxgl-compact"):this._container.classList.remove("mapboxgl-compact")};var _i=function(){t.bindAll(["_updateLogo"],this),t.bindAll(["_updateCompact"],this)};_i.prototype.onAdd=function(t){this._map=t,this._container=r.create("div","mapboxgl-ctrl");var e=r.create("a","mapboxgl-ctrl-logo");return e.target="_blank",e.rel="noopener nofollow",e.href="https://www.mapbox.com/",e.setAttribute("aria-label",this._map._getUIString("LogoControl.Title")),e.setAttribute("rel","noopener nofollow"),this._container.appendChild(e),this._container.style.display="none",this._map.on("sourcedata",this._updateLogo),this._updateLogo(),this._map.on("resize",this._updateCompact),this._updateCompact(),this._container},_i.prototype.onRemove=function(){r.remove(this._container),this._map.off("sourcedata",this._updateLogo),this._map.off("resize",this._updateCompact)},_i.prototype.getDefaultPosition=function(){return"bottom-left"},_i.prototype._updateLogo=function(t){t&&"metadata"!==t.sourceDataType||(this._container.style.display=this._logoRequired()?"block":"none")},_i.prototype._logoRequired=function(){if(this._map.style){var t=this._map.style.sourceCaches;for(var e in t){if(t[e].getSource().mapbox_logo)return!0}return!1}},_i.prototype._updateCompact=function(){var t=this._container.children;if(t.length){var e=t[0];this._map.getCanvasContainer().offsetWidth<250?e.classList.add("mapboxgl-compact"):e.classList.remove("mapboxgl-compact")}};var wi=function(){this._queue=[],this._id=0,this._cleared=!1,this._currentlyRunning=!1};wi.prototype.add=function(t){var e=++this._id;return this._queue.push({callback:t,id:e,cancelled:!1}),e},wi.prototype.remove=function(t){for(var e=this._currentlyRunning,r=0,n=e?this._queue.concat(e):this._queue;re.maxZoom)throw new Error("maxZoom must be greater than or equal to minZoom");if(null!=e.minPitch&&null!=e.maxPitch&&e.minPitch>e.maxPitch)throw new Error("maxPitch must be greater than or equal to minPitch");if(null!=e.minPitch&&e.minPitch<0)throw new Error("minPitch must be greater than or equal to 0");if(null!=e.maxPitch&&e.maxPitch>60)throw new Error("maxPitch must be less than or equal to 60");var i=new An(e.minZoom,e.maxZoom,e.minPitch,e.maxPitch,e.renderWorldCopies);if(n.call(this,i,e),this._interactive=e.interactive,this._maxTileCacheSize=e.maxTileCacheSize,this._failIfMajorPerformanceCaveat=e.failIfMajorPerformanceCaveat,this._preserveDrawingBuffer=e.preserveDrawingBuffer,this._antialias=e.antialias,this._trackResize=e.trackResize,this._bearingSnap=e.bearingSnap,this._refreshExpiredTiles=e.refreshExpiredTiles,this._fadeDuration=e.fadeDuration,this._crossSourceCollisions=e.crossSourceCollisions,this._crossFadingFactor=1,this._collectResourceTiming=e.collectResourceTiming,this._renderTaskQueue=new wi,this._controls=[],this._mapId=t.uniqueId(),this._locale=t.extend({},Ti,e.locale),this._requestManager=new t.RequestManager(e.transformRequest,e.accessToken),"string"==typeof e.container){if(this._container=t.window.document.getElementById(e.container),!this._container)throw new Error("Container '"+e.container+"' not found.")}else{if(!(e.container instanceof Ai))throw new Error("Invalid type: 'container' must be a String or HTMLElement.");this._container=e.container}if(e.maxBounds&&this.setMaxBounds(e.maxBounds),t.bindAll(["_onWindowOnline","_onWindowResize","_contextLost","_contextRestored"],this),this._setupContainer(),this._setupPainter(),void 0===this.painter)throw new Error("Failed to initialize WebGL.");this.on("move",(function(){return r._update(!1)})),this.on("moveend",(function(){return r._update(!1)})),this.on("zoom",(function(){return r._update(!0)})),void 0!==t.window&&(t.window.addEventListener("online",this._onWindowOnline,!1),t.window.addEventListener("resize",this._onWindowResize,!1)),this.handlers=new yi(this,e);var a="string"==typeof e.hash&&e.hash||void 0;this._hash=e.hash&&new Sn(a).addTo(this),this._hash&&this._hash._onHashChange()||(this.jumpTo({center:e.center,zoom:e.zoom,bearing:e.bearing,pitch:e.pitch}),e.bounds&&(this.resize(),this.fitBounds(e.bounds,t.extend({},e.fitBoundsOptions,{duration:0})))),this.resize(),this._localIdeographFontFamily=e.localIdeographFontFamily,e.style&&this.setStyle(e.style,{localIdeographFontFamily:e.localIdeographFontFamily}),e.attributionControl&&this.addControl(new bi({customAttribution:e.customAttribution})),this.addControl(new _i,e.logoPosition),this.on("style.load",(function(){r.transform.unmodified&&r.jumpTo(r.style.stylesheet)})),this.on("data",(function(e){r._update("style"===e.dataType),r.fire(new t.Event(e.dataType+"data",e))})),this.on("dataloading",(function(e){r.fire(new t.Event(e.dataType+"dataloading",e))}))}n&&(i.__proto__=n),i.prototype=Object.create(n&&n.prototype),i.prototype.constructor=i;var a={showTileBoundaries:{configurable:!0},showPadding:{configurable:!0},showCollisionBoxes:{configurable:!0},showOverdrawInspector:{configurable:!0},repaint:{configurable:!0},vertices:{configurable:!0},version:{configurable:!0}};return i.prototype._getMapId=function(){return this._mapId},i.prototype.addControl=function(e,r){if(void 0===r&&e.getDefaultPosition&&(r=e.getDefaultPosition()),void 0===r&&(r="top-right"),!e||!e.onAdd)return this.fire(new t.ErrorEvent(new Error("Invalid argument to map.addControl(). Argument must be a control with onAdd and onRemove methods.")));var n=e.onAdd(this);this._controls.push(e);var i=this._controlPositions[r];return-1!==r.indexOf("bottom")?i.insertBefore(n,i.firstChild):i.appendChild(n),this},i.prototype.removeControl=function(e){if(!e||!e.onRemove)return this.fire(new t.ErrorEvent(new Error("Invalid argument to map.removeControl(). Argument must be a control with onAdd and onRemove methods.")));var r=this._controls.indexOf(e);return r>-1&&this._controls.splice(r,1),e.onRemove(this),this},i.prototype.resize=function(e){var r=this._containerDimensions(),n=r[0],i=r[1];this._resizeCanvas(n,i),this.transform.resize(n,i),this.painter.resize(n,i);var a=!this._moving;return a&&(this.stop(),this.fire(new t.Event("movestart",e)).fire(new t.Event("move",e))),this.fire(new t.Event("resize",e)),a&&this.fire(new t.Event("moveend",e)),this},i.prototype.getBounds=function(){return this.transform.getBounds()},i.prototype.getMaxBounds=function(){return this.transform.getMaxBounds()},i.prototype.setMaxBounds=function(e){return this.transform.setMaxBounds(t.LngLatBounds.convert(e)),this._update()},i.prototype.setMinZoom=function(t){if((t=null==t?-2:t)>=-2&&t<=this.transform.maxZoom)return this.transform.minZoom=t,this._update(),this.getZoom()=this.transform.minZoom)return this.transform.maxZoom=t,this._update(),this.getZoom()>t&&this.setZoom(t),this;throw new Error("maxZoom must be greater than the current minZoom")},i.prototype.getMaxZoom=function(){return this.transform.maxZoom},i.prototype.setMinPitch=function(t){if((t=null==t?0:t)<0)throw new Error("minPitch must be greater than or equal to 0");if(t>=0&&t<=this.transform.maxPitch)return this.transform.minPitch=t,this._update(),this.getPitch()60)throw new Error("maxPitch must be less than or equal to 60");if(t>=this.transform.minPitch)return this.transform.maxPitch=t,this._update(),this.getPitch()>t&&this.setPitch(t),this;throw new Error("maxPitch must be greater than the current minPitch")},i.prototype.getMaxPitch=function(){return this.transform.maxPitch},i.prototype.getRenderWorldCopies=function(){return this.transform.renderWorldCopies},i.prototype.setRenderWorldCopies=function(t){return this.transform.renderWorldCopies=t,this._update()},i.prototype.project=function(e){return this.transform.locationPoint(t.LngLat.convert(e))},i.prototype.unproject=function(e){return this.transform.pointLocation(t.Point.convert(e))},i.prototype.isMoving=function(){return this._moving||this.handlers.isMoving()},i.prototype.isZooming=function(){return this._zooming||this.handlers.isZooming()},i.prototype.isRotating=function(){return this._rotating||this.handlers.isRotating()},i.prototype._createDelegatedListener=function(t,e,r){var n,i=this;if("mouseenter"===t||"mouseover"===t){var a=!1;return{layer:e,listener:r,delegates:{mousemove:function(n){var o=i.getLayer(e)?i.queryRenderedFeatures(n.point,{layers:[e]}):[];o.length?a||(a=!0,r.call(i,new Rn(t,i,n.originalEvent,{features:o}))):a=!1},mouseout:function(){a=!1}}}}if("mouseleave"===t||"mouseout"===t){var o=!1;return{layer:e,listener:r,delegates:{mousemove:function(n){(i.getLayer(e)?i.queryRenderedFeatures(n.point,{layers:[e]}):[]).length?o=!0:o&&(o=!1,r.call(i,new Rn(t,i,n.originalEvent)))},mouseout:function(e){o&&(o=!1,r.call(i,new Rn(t,i,e.originalEvent)))}}}}return{layer:e,listener:r,delegates:(n={},n[t]=function(t){var n=i.getLayer(e)?i.queryRenderedFeatures(t.point,{layers:[e]}):[];n.length&&(t.features=n,r.call(i,t),delete t.features)},n)}},i.prototype.on=function(t,e,r){if(void 0===r)return n.prototype.on.call(this,t,e);var i=this._createDelegatedListener(t,e,r);for(var a in this._delegatedListeners=this._delegatedListeners||{},this._delegatedListeners[t]=this._delegatedListeners[t]||[],this._delegatedListeners[t].push(i),i.delegates)this.on(a,i.delegates[a]);return this},i.prototype.once=function(t,e,r){if(void 0===r)return n.prototype.once.call(this,t,e);var i=this._createDelegatedListener(t,e,r);for(var a in i.delegates)this.once(a,i.delegates[a]);return this},i.prototype.off=function(t,e,r){var i=this;if(void 0===r)return n.prototype.off.call(this,t,e);return this._delegatedListeners&&this._delegatedListeners[t]&&function(n){for(var a=n[t],o=0;o180;){var s=n.locationPoint(e);if(s.x>=0&&s.y>=0&&s.x<=n.width&&s.y<=n.height)break;e.lng>n.center.lng?e.lng-=360:e.lng+=360}return e}Ii.prototype.down=function(t,e){this.mouseRotate.mousedown(t,e),this.mousePitch&&this.mousePitch.mousedown(t,e),r.disableDrag()},Ii.prototype.move=function(t,e){var r=this.map,n=this.mouseRotate.mousemoveWindow(t,e);if(n&&n.bearingDelta&&r.setBearing(r.getBearing()+n.bearingDelta),this.mousePitch){var i=this.mousePitch.mousemoveWindow(t,e);i&&i.pitchDelta&&r.setPitch(r.getPitch()+i.pitchDelta)}},Ii.prototype.off=function(){var t=this.element;r.removeEventListener(t,"mousedown",this.mousedown),r.removeEventListener(t,"touchstart",this.touchstart,{passive:!1}),r.removeEventListener(t,"touchmove",this.touchmove),r.removeEventListener(t,"touchend",this.touchend),r.removeEventListener(t,"touchcancel",this.reset),this.offTemp()},Ii.prototype.offTemp=function(){r.enableDrag(),r.removeEventListener(t.window,"mousemove",this.mousemove),r.removeEventListener(t.window,"mouseup",this.mouseup)},Ii.prototype.mousedown=function(e){this.down(t.extend({},e,{ctrlKey:!0,preventDefault:function(){return e.preventDefault()}}),r.mousePos(this.element,e)),r.addEventListener(t.window,"mousemove",this.mousemove),r.addEventListener(t.window,"mouseup",this.mouseup)},Ii.prototype.mousemove=function(t){this.move(t,r.mousePos(this.element,t))},Ii.prototype.mouseup=function(t){this.mouseRotate.mouseupWindow(t),this.mousePitch&&this.mousePitch.mouseupWindow(t),this.offTemp()},Ii.prototype.touchstart=function(t){1!==t.targetTouches.length?this.reset():(this._startPos=this._lastPos=r.touchPos(this.element,t.targetTouches)[0],this.down({type:"mousedown",button:0,ctrlKey:!0,preventDefault:function(){return t.preventDefault()}},this._startPos))},Ii.prototype.touchmove=function(t){1!==t.targetTouches.length?this.reset():(this._lastPos=r.touchPos(this.element,t.targetTouches)[0],this.move({preventDefault:function(){return t.preventDefault()}},this._lastPos))},Ii.prototype.touchend=function(t){0===t.targetTouches.length&&this._startPos&&this._lastPos&&this._startPos.dist(this._lastPos)e.getEast()||r.latitudee.getNorth())},n.prototype._setErrorState=function(){switch(this._watchState){case"WAITING_ACTIVE":this._watchState="ACTIVE_ERROR",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active-error");break;case"ACTIVE_LOCK":this._watchState="ACTIVE_ERROR",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting");break;case"BACKGROUND":this._watchState="BACKGROUND_ERROR",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting")}},n.prototype._onSuccess=function(e){if(this._map){if(this._isOutOfMapMaxBounds(e))return this._setErrorState(),this.fire(new t.Event("outofmaxbounds",e)),this._updateMarker(),void this._finish();if(this.options.trackUserLocation)switch(this._lastKnownPosition=e,this._watchState){case"WAITING_ACTIVE":case"ACTIVE_LOCK":case"ACTIVE_ERROR":this._watchState="ACTIVE_LOCK",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active");break;case"BACKGROUND":case"BACKGROUND_ERROR":this._watchState="BACKGROUND",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background")}this.options.showUserLocation&&"OFF"!==this._watchState&&this._updateMarker(e),this.options.trackUserLocation&&"ACTIVE_LOCK"!==this._watchState||this._updateCamera(e),this.options.showUserLocation&&this._dotElement.classList.remove("mapboxgl-user-location-dot-stale"),this.fire(new t.Event("geolocate",e)),this._finish()}},n.prototype._updateCamera=function(e){var r=new t.LngLat(e.coords.longitude,e.coords.latitude),n=e.coords.accuracy,i=this._map.getBearing(),a=t.extend({bearing:i},this.options.fitBoundsOptions);this._map.fitBounds(r.toBounds(n),a,{geolocateSource:!0})},n.prototype._updateMarker=function(e){if(e){var r=new t.LngLat(e.coords.longitude,e.coords.latitude);this._accuracyCircleMarker.setLngLat(r).addTo(this._map),this._userLocationDotMarker.setLngLat(r).addTo(this._map),this._accuracy=e.coords.accuracy,this.options.showUserLocation&&this.options.showAccuracyCircle&&this._updateCircleRadius()}else this._userLocationDotMarker.remove(),this._accuracyCircleMarker.remove()},n.prototype._updateCircleRadius=function(){var t=this._map._container.clientHeight/2,e=this._map.unproject([0,t]),r=this._map.unproject([1,t]),n=e.distanceTo(r),i=Math.ceil(2*this._accuracy/n);this._circleElement.style.width=i+"px",this._circleElement.style.height=i+"px"},n.prototype._onZoom=function(){this.options.showUserLocation&&this.options.showAccuracyCircle&&this._updateCircleRadius()},n.prototype._onError=function(e){if(this._map){if(this.options.trackUserLocation)if(1===e.code){this._watchState="OFF",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background-error"),this._geolocateButton.disabled=!0;var r=this._map._getUIString("GeolocateControl.LocationNotAvailable");this._geolocateButton.title=r,this._geolocateButton.setAttribute("aria-label",r),void 0!==this._geolocationWatchID&&this._clearWatch()}else{if(3===e.code&&ji)return;this._setErrorState()}"OFF"!==this._watchState&&this.options.showUserLocation&&this._dotElement.classList.add("mapboxgl-user-location-dot-stale"),this.fire(new t.Event("error",e)),this._finish()}},n.prototype._finish=function(){this._timeoutId&&clearTimeout(this._timeoutId),this._timeoutId=void 0},n.prototype._setupUI=function(e){var n=this;if(this._container.addEventListener("contextmenu",(function(t){return t.preventDefault()})),this._geolocateButton=r.create("button","mapboxgl-ctrl-geolocate",this._container),r.create("span","mapboxgl-ctrl-icon",this._geolocateButton).setAttribute("aria-hidden",!0),this._geolocateButton.type="button",!1===e){t.warnOnce("Geolocation support is not available so the GeolocateControl will be disabled.");var i=this._map._getUIString("GeolocateControl.LocationNotAvailable");this._geolocateButton.disabled=!0,this._geolocateButton.title=i,this._geolocateButton.setAttribute("aria-label",i)}else{var a=this._map._getUIString("GeolocateControl.FindMyLocation");this._geolocateButton.title=a,this._geolocateButton.setAttribute("aria-label",a)}this.options.trackUserLocation&&(this._geolocateButton.setAttribute("aria-pressed","false"),this._watchState="OFF"),this.options.showUserLocation&&(this._dotElement=r.create("div","mapboxgl-user-location-dot"),this._userLocationDotMarker=new Fi(this._dotElement),this._circleElement=r.create("div","mapboxgl-user-location-accuracy-circle"),this._accuracyCircleMarker=new Fi({element:this._circleElement,pitchAlignment:"map"}),this.options.trackUserLocation&&(this._watchState="OFF"),this._map.on("zoom",this._onZoom)),this._geolocateButton.addEventListener("click",this.trigger.bind(this)),this._setup=!0,this.options.trackUserLocation&&this._map.on("movestart",(function(e){var r=e.originalEvent&&"resize"===e.originalEvent.type;e.geolocateSource||"ACTIVE_LOCK"!==n._watchState||r||(n._watchState="BACKGROUND",n._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background"),n._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),n.fire(new t.Event("trackuserlocationend")))}))},n.prototype.trigger=function(){if(!this._setup)return t.warnOnce("Geolocate control triggered before added to a map"),!1;if(this.options.trackUserLocation){switch(this._watchState){case"OFF":this._watchState="WAITING_ACTIVE",this.fire(new t.Event("trackuserlocationstart"));break;case"WAITING_ACTIVE":case"ACTIVE_LOCK":case"ACTIVE_ERROR":case"BACKGROUND_ERROR":Ni--,ji=!1,this._watchState="OFF",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background-error"),this.fire(new t.Event("trackuserlocationend"));break;case"BACKGROUND":this._watchState="ACTIVE_LOCK",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._lastKnownPosition&&this._updateCamera(this._lastKnownPosition),this.fire(new t.Event("trackuserlocationstart"))}switch(this._watchState){case"WAITING_ACTIVE":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active");break;case"ACTIVE_LOCK":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active");break;case"ACTIVE_ERROR":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active-error");break;case"BACKGROUND":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background");break;case"BACKGROUND_ERROR":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background-error")}if("OFF"===this._watchState&&void 0!==this._geolocationWatchID)this._clearWatch();else if(void 0===this._geolocationWatchID){var e;this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.setAttribute("aria-pressed","true"),++Ni>1?(e={maximumAge:6e5,timeout:0},ji=!0):(e=this.options.positionOptions,ji=!1),this._geolocationWatchID=t.window.navigator.geolocation.watchPosition(this._onSuccess,this._onError,e)}}else t.window.navigator.geolocation.getCurrentPosition(this._onSuccess,this._onError,this.options.positionOptions),this._timeoutId=setTimeout(this._finish,1e4);return!0},n.prototype._clearWatch=function(){t.window.navigator.geolocation.clearWatch(this._geolocationWatchID),this._geolocationWatchID=void 0,this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.setAttribute("aria-pressed","false"),this.options.showUserLocation&&this._updateMarker(null)},n}(t.Evented),Vi={maxWidth:100,unit:"metric"},Hi=function(e){this.options=t.extend({},Vi,e),t.bindAll(["_onMove","setUnit"],this)};function qi(t,e,r){var n=r&&r.maxWidth||100,i=t._container.clientHeight/2,a=t.unproject([0,i]),o=t.unproject([n,i]),s=a.distanceTo(o);if(r&&"imperial"===r.unit){var l=3.2808*s;if(l>5280)Gi(e,n,l/5280,t._getUIString("ScaleControl.Miles"));else Gi(e,n,l,t._getUIString("ScaleControl.Feet"))}else if(r&&"nautical"===r.unit){Gi(e,n,s/1852,t._getUIString("ScaleControl.NauticalMiles"))}else s>=1e3?Gi(e,n,s/1e3,t._getUIString("ScaleControl.Kilometers")):Gi(e,n,s,t._getUIString("ScaleControl.Meters"))}function Gi(t,e,r,n){var i,a,o,s=(i=r,a=Math.pow(10,(""+Math.floor(i)).length-1),o=(o=i/a)>=10?10:o>=5?5:o>=3?3:o>=2?2:o>=1?1:function(t){var e=Math.pow(10,Math.ceil(-Math.log(t)/Math.LN10));return Math.round(t*e)/e}(o),a*o),l=s/r;t.style.width=e*l+"px",t.innerHTML=s+" "+n}Hi.prototype.getDefaultPosition=function(){return"bottom-left"},Hi.prototype._onMove=function(){qi(this._map,this._container,this.options)},Hi.prototype.onAdd=function(t){return this._map=t,this._container=r.create("div","mapboxgl-ctrl mapboxgl-ctrl-scale",t.getContainer()),this._map.on("move",this._onMove),this._onMove(),this._container},Hi.prototype.onRemove=function(){r.remove(this._container),this._map.off("move",this._onMove),this._map=void 0},Hi.prototype.setUnit=function(t){this.options.unit=t,qi(this._map,this._container,this.options)};var Yi=function(e){this._fullscreen=!1,e&&e.container&&(e.container instanceof t.window.HTMLElement?this._container=e.container:t.warnOnce("Full screen control 'container' must be a DOM element.")),t.bindAll(["_onClickFullscreen","_changeIcon"],this),"onfullscreenchange"in t.window.document?this._fullscreenchange="fullscreenchange":"onmozfullscreenchange"in t.window.document?this._fullscreenchange="mozfullscreenchange":"onwebkitfullscreenchange"in t.window.document?this._fullscreenchange="webkitfullscreenchange":"onmsfullscreenchange"in t.window.document&&(this._fullscreenchange="MSFullscreenChange")};Yi.prototype.onAdd=function(e){return this._map=e,this._container||(this._container=this._map.getContainer()),this._controlContainer=r.create("div","mapboxgl-ctrl mapboxgl-ctrl-group"),this._checkFullscreenSupport()?this._setupUI():(this._controlContainer.style.display="none",t.warnOnce("This device does not support fullscreen mode.")),this._controlContainer},Yi.prototype.onRemove=function(){r.remove(this._controlContainer),this._map=null,t.window.document.removeEventListener(this._fullscreenchange,this._changeIcon)},Yi.prototype._checkFullscreenSupport=function(){return!!(t.window.document.fullscreenEnabled||t.window.document.mozFullScreenEnabled||t.window.document.msFullscreenEnabled||t.window.document.webkitFullscreenEnabled)},Yi.prototype._setupUI=function(){var e=this._fullscreenButton=r.create("button","mapboxgl-ctrl-fullscreen",this._controlContainer);r.create("span","mapboxgl-ctrl-icon",e).setAttribute("aria-hidden",!0),e.type="button",this._updateTitle(),this._fullscreenButton.addEventListener("click",this._onClickFullscreen),t.window.document.addEventListener(this._fullscreenchange,this._changeIcon)},Yi.prototype._updateTitle=function(){var t=this._getTitle();this._fullscreenButton.setAttribute("aria-label",t),this._fullscreenButton.title=t},Yi.prototype._getTitle=function(){return this._map._getUIString(this._isFullscreen()?"FullscreenControl.Exit":"FullscreenControl.Enter")},Yi.prototype._isFullscreen=function(){return this._fullscreen},Yi.prototype._changeIcon=function(){(t.window.document.fullscreenElement||t.window.document.mozFullScreenElement||t.window.document.webkitFullscreenElement||t.window.document.msFullscreenElement)===this._container!==this._fullscreen&&(this._fullscreen=!this._fullscreen,this._fullscreenButton.classList.toggle("mapboxgl-ctrl-shrink"),this._fullscreenButton.classList.toggle("mapboxgl-ctrl-fullscreen"),this._updateTitle())},Yi.prototype._onClickFullscreen=function(){this._isFullscreen()?t.window.document.exitFullscreen?t.window.document.exitFullscreen():t.window.document.mozCancelFullScreen?t.window.document.mozCancelFullScreen():t.window.document.msExitFullscreen?t.window.document.msExitFullscreen():t.window.document.webkitCancelFullScreen&&t.window.document.webkitCancelFullScreen():this._container.requestFullscreen?this._container.requestFullscreen():this._container.mozRequestFullScreen?this._container.mozRequestFullScreen():this._container.msRequestFullscreen?this._container.msRequestFullscreen():this._container.webkitRequestFullscreen&&this._container.webkitRequestFullscreen()};var Wi={closeButton:!0,closeOnClick:!0,className:"",maxWidth:"240px"},Xi=function(e){function n(r){e.call(this),this.options=t.extend(Object.create(Wi),r),t.bindAll(["_update","_onClose","remove","_onMouseMove","_onMouseUp","_onDrag"],this)}return e&&(n.__proto__=e),n.prototype=Object.create(e&&e.prototype),n.prototype.constructor=n,n.prototype.addTo=function(e){return this._map&&this.remove(),this._map=e,this.options.closeOnClick&&this._map.on("click",this._onClose),this.options.closeOnMove&&this._map.on("move",this._onClose),this._map.on("remove",this.remove),this._update(),this._trackPointer?(this._map.on("mousemove",this._onMouseMove),this._map.on("mouseup",this._onMouseUp),this._container&&this._container.classList.add("mapboxgl-popup-track-pointer"),this._map._canvasContainer.classList.add("mapboxgl-track-pointer")):this._map.on("move",this._update),this.fire(new t.Event("open")),this},n.prototype.isOpen=function(){return!!this._map},n.prototype.remove=function(){return this._content&&r.remove(this._content),this._container&&(r.remove(this._container),delete this._container),this._map&&(this._map.off("move",this._update),this._map.off("move",this._onClose),this._map.off("click",this._onClose),this._map.off("remove",this.remove),this._map.off("mousemove",this._onMouseMove),this._map.off("mouseup",this._onMouseUp),this._map.off("drag",this._onDrag),delete this._map),this.fire(new t.Event("close")),this},n.prototype.getLngLat=function(){return this._lngLat},n.prototype.setLngLat=function(e){return this._lngLat=t.LngLat.convert(e),this._pos=null,this._trackPointer=!1,this._update(),this._map&&(this._map.on("move",this._update),this._map.off("mousemove",this._onMouseMove),this._container&&this._container.classList.remove("mapboxgl-popup-track-pointer"),this._map._canvasContainer.classList.remove("mapboxgl-track-pointer")),this},n.prototype.trackPointer=function(){return this._trackPointer=!0,this._pos=null,this._update(),this._map&&(this._map.off("move",this._update),this._map.on("mousemove",this._onMouseMove),this._map.on("drag",this._onDrag),this._container&&this._container.classList.add("mapboxgl-popup-track-pointer"),this._map._canvasContainer.classList.add("mapboxgl-track-pointer")),this},n.prototype.getElement=function(){return this._container},n.prototype.setText=function(e){return this.setDOMContent(t.window.document.createTextNode(e))},n.prototype.setHTML=function(e){var r,n=t.window.document.createDocumentFragment(),i=t.window.document.createElement("body");for(i.innerHTML=e;r=i.firstChild;)n.appendChild(r);return this.setDOMContent(n)},n.prototype.getMaxWidth=function(){return this._container&&this._container.style.maxWidth},n.prototype.setMaxWidth=function(t){return this.options.maxWidth=t,this._update(),this},n.prototype.setDOMContent=function(t){return this._createContent(),this._content.appendChild(t),this._update(),this},n.prototype.addClassName=function(t){this._container&&this._container.classList.add(t)},n.prototype.removeClassName=function(t){this._container&&this._container.classList.remove(t)},n.prototype.toggleClassName=function(t){if(this._container)return this._container.classList.toggle(t)},n.prototype._createContent=function(){this._content&&r.remove(this._content),this._content=r.create("div","mapboxgl-popup-content",this._container),this.options.closeButton&&(this._closeButton=r.create("button","mapboxgl-popup-close-button",this._content),this._closeButton.type="button",this._closeButton.setAttribute("aria-label","Close popup"),this._closeButton.innerHTML="×",this._closeButton.addEventListener("click",this._onClose))},n.prototype._onMouseUp=function(t){this._update(t.point)},n.prototype._onMouseMove=function(t){this._update(t.point)},n.prototype._onDrag=function(t){this._update(t.point)},n.prototype._update=function(e){var n=this,i=this._lngLat||this._trackPointer;if(this._map&&i&&this._content&&(this._container||(this._container=r.create("div","mapboxgl-popup",this._map.getContainer()),this._tip=r.create("div","mapboxgl-popup-tip",this._container),this._container.appendChild(this._content),this.options.className&&this.options.className.split(" ").forEach((function(t){return n._container.classList.add(t)})),this._trackPointer&&this._container.classList.add("mapboxgl-popup-track-pointer")),this.options.maxWidth&&this._container.style.maxWidth!==this.options.maxWidth&&(this._container.style.maxWidth=this.options.maxWidth),this._map.transform.renderWorldCopies&&!this._trackPointer&&(this._lngLat=Oi(this._lngLat,this._pos,this._map.transform)),!this._trackPointer||e)){var a=this._pos=this._trackPointer&&e?e:this._map.project(this._lngLat),o=this.options.anchor,s=function e(r){if(r){if("number"==typeof r){var n=Math.round(Math.sqrt(.5*Math.pow(r,2)));return{center:new t.Point(0,0),top:new t.Point(0,r),"top-left":new t.Point(n,n),"top-right":new t.Point(-n,n),bottom:new t.Point(0,-r),"bottom-left":new t.Point(n,-n),"bottom-right":new t.Point(-n,-n),left:new t.Point(r,0),right:new t.Point(-r,0)}}if(r instanceof t.Point||Array.isArray(r)){var i=t.Point.convert(r);return{center:i,top:i,"top-left":i,"top-right":i,bottom:i,"bottom-left":i,"bottom-right":i,left:i,right:i}}return{center:t.Point.convert(r.center||[0,0]),top:t.Point.convert(r.top||[0,0]),"top-left":t.Point.convert(r["top-left"]||[0,0]),"top-right":t.Point.convert(r["top-right"]||[0,0]),bottom:t.Point.convert(r.bottom||[0,0]),"bottom-left":t.Point.convert(r["bottom-left"]||[0,0]),"bottom-right":t.Point.convert(r["bottom-right"]||[0,0]),left:t.Point.convert(r.left||[0,0]),right:t.Point.convert(r.right||[0,0])}}return e(new t.Point(0,0))}(this.options.offset);if(!o){var l,c=this._container.offsetWidth,u=this._container.offsetHeight;l=a.y+s.bottom.ythis._map.transform.height-u?["bottom"]:[],a.xthis._map.transform.width-c/2&&l.push("right"),o=0===l.length?"bottom":l.join("-")}var f=a.add(s[o]).round();r.setTransform(this._container,zi[o]+" translate("+f.x+"px,"+f.y+"px)"),Di(this._container,o,"popup")}},n.prototype._onClose=function(){this.remove()},n}(t.Evented);var Zi={version:t.version,supported:e,setRTLTextPlugin:t.setRTLTextPlugin,getRTLTextPluginStatus:t.getRTLTextPluginStatus,Map:Ei,NavigationControl:Pi,GeolocateControl:Ui,AttributionControl:bi,ScaleControl:Hi,FullscreenControl:Yi,Popup:Xi,Marker:Fi,Style:qe,LngLat:t.LngLat,LngLatBounds:t.LngLatBounds,Point:t.Point,MercatorCoordinate:t.MercatorCoordinate,Evented:t.Evented,config:t.config,prewarm:function(){Bt().acquire(zt)},clearPrewarmedResources:function(){var t=Rt;t&&(t.isPreloaded()&&1===t.numActive()?(t.release(zt),Rt=null):console.warn("Could not clear WebWorkers since there are active Map instances that still reference it. The pre-warmed WebWorker pool can only be cleared when all map instances have been removed with map.remove()"))},get accessToken(){return t.config.ACCESS_TOKEN},set accessToken(e){t.config.ACCESS_TOKEN=e},get baseApiUrl(){return t.config.API_URL},set baseApiUrl(e){t.config.API_URL=e},get workerCount(){return Dt.workerCount},set workerCount(t){Dt.workerCount=t},get maxParallelImageRequests(){return t.config.MAX_PARALLEL_IMAGE_REQUESTS},set maxParallelImageRequests(e){t.config.MAX_PARALLEL_IMAGE_REQUESTS=e},clearStorage:function(e){t.clearTileCache(e)},workerUrl:""};return Zi})),r}))},{}],235:[function(t,e,r){"use strict";e.exports=Math.log2||function(t){return Math.log(t)*Math.LOG2E}},{}],236:[function(t,e,r){"use strict";e.exports=function(t,e){e||(e=t,t=window);var r=0,i=0,a=0,o={shift:!1,alt:!1,control:!1,meta:!1},s=!1;function l(t){var e=!1;return"altKey"in t&&(e=e||t.altKey!==o.alt,o.alt=!!t.altKey),"shiftKey"in t&&(e=e||t.shiftKey!==o.shift,o.shift=!!t.shiftKey),"ctrlKey"in t&&(e=e||t.ctrlKey!==o.control,o.control=!!t.ctrlKey),"metaKey"in t&&(e=e||t.metaKey!==o.meta,o.meta=!!t.metaKey),e}function c(t,s){var c=n.x(s),u=n.y(s);"buttons"in s&&(t=0|s.buttons),(t!==r||c!==i||u!==a||l(s))&&(r=0|t,i=c||0,a=u||0,e&&e(r,i,a,o))}function u(t){c(0,t)}function f(){(r||i||a||o.shift||o.alt||o.meta||o.control)&&(i=a=0,r=0,o.shift=o.alt=o.control=o.meta=!1,e&&e(0,0,0,o))}function h(t){l(t)&&e&&e(r,i,a,o)}function p(t){0===n.buttons(t)?c(0,t):c(r,t)}function d(t){c(r|n.buttons(t),t)}function m(t){c(r&~n.buttons(t),t)}function g(){s||(s=!0,t.addEventListener("mousemove",p),t.addEventListener("mousedown",d),t.addEventListener("mouseup",m),t.addEventListener("mouseleave",u),t.addEventListener("mouseenter",u),t.addEventListener("mouseout",u),t.addEventListener("mouseover",u),t.addEventListener("blur",f),t.addEventListener("keyup",h),t.addEventListener("keydown",h),t.addEventListener("keypress",h),t!==window&&(window.addEventListener("blur",f),window.addEventListener("keyup",h),window.addEventListener("keydown",h),window.addEventListener("keypress",h)))}g();var v={element:t};return Object.defineProperties(v,{enabled:{get:function(){return s},set:function(e){e?g():function(){if(!s)return;s=!1,t.removeEventListener("mousemove",p),t.removeEventListener("mousedown",d),t.removeEventListener("mouseup",m),t.removeEventListener("mouseleave",u),t.removeEventListener("mouseenter",u),t.removeEventListener("mouseout",u),t.removeEventListener("mouseover",u),t.removeEventListener("blur",f),t.removeEventListener("keyup",h),t.removeEventListener("keydown",h),t.removeEventListener("keypress",h),t!==window&&(window.removeEventListener("blur",f),window.removeEventListener("keyup",h),window.removeEventListener("keydown",h),window.removeEventListener("keypress",h))}()},enumerable:!0},buttons:{get:function(){return r},enumerable:!0},x:{get:function(){return i},enumerable:!0},y:{get:function(){return a},enumerable:!0},mods:{get:function(){return o},enumerable:!0}}),v};var n=t("mouse-event")},{"mouse-event":238}],237:[function(t,e,r){var n={left:0,top:0};e.exports=function(t,e,r){e=e||t.currentTarget||t.srcElement,Array.isArray(r)||(r=[0,0]);var i=t.clientX||0,a=t.clientY||0,o=(s=e,s===window||s===document||s===document.body?n:s.getBoundingClientRect());var s;return r[0]=i-o.left,r[1]=a-o.top,r}},{}],238:[function(t,e,r){"use strict";function n(t){return t.target||t.srcElement||window}r.buttons=function(t){if("object"==typeof t){if("buttons"in t)return t.buttons;if("which"in t){if(2===(e=t.which))return 4;if(3===e)return 2;if(e>0)return 1<=0)return 1<0&&o(l,r))}catch(t){f.call(new p(r),t)}}}function f(t){var e=this;e.triggered||(e.triggered=!0,e.def&&(e=e.def),e.msg=t,e.state=2,e.chain.length>0&&o(l,e))}function h(t,e,r,n){for(var i=0;i1&&(r*=v=Math.sqrt(v),a*=v);var y=r*r,x=a*a,b=(c==u?-1:1)*Math.sqrt(Math.abs((y*x-y*g*g-x*m*m)/(y*g*g+x*m*m)));b==1/0&&(b=1);var _=b*r*g/a+(t+f)/2,w=b*-a*m/r+(e+h)/2,T=Math.asin(((e-w)/a).toFixed(9)),k=Math.asin(((h-w)/a).toFixed(9));(T=t<_?n-T:T)<0&&(T=2*n+T),(k=f<_?n-k:k)<0&&(k=2*n+k),u&&T>k&&(T-=2*n),!u&&k>T&&(k-=2*n)}if(Math.abs(k-T)>i){var A=k,M=f,S=h;k=T+i*(u&&k>T?1:-1);var E=s(f=_+r*Math.cos(k),h=w+a*Math.sin(k),r,a,o,0,u,M,S,[k,A,_,w])}var L=Math.tan((k-T)/4),C=4/3*r*L,P=4/3*a*L,I=[2*t-(t+C*Math.sin(T)),2*e-(e-P*Math.cos(T)),f+C*Math.sin(k),h-P*Math.cos(k),f,h];if(p)return I;E&&(I=I.concat(E));for(var O=0;O7&&(r.push(v.splice(0,7)),v.unshift("C"));break;case"S":var x=p,b=d;"C"!=e&&"S"!=e||(x+=x-n,b+=b-i),v=["C",x,b,v[1],v[2],v[3],v[4]];break;case"T":"Q"==e||"T"==e?(f=2*p-f,h=2*d-h):(f=p,h=d),v=o(p,d,f,h,v[1],v[2]);break;case"Q":f=v[1],h=v[2],v=o(p,d,v[1],v[2],v[3],v[4]);break;case"L":v=a(p,d,v[1],v[2]);break;case"H":v=a(p,d,v[1],d);break;case"V":v=a(p,d,p,v[1]);break;case"Z":v=a(p,d,l,u)}e=y,p=v[v.length-2],d=v[v.length-1],v.length>4?(n=v[v.length-4],i=v[v.length-3]):(n=p,i=d),r.push(v)}return r}},{}],242:[function(t,e,r){ -/* -object-assign -(c) Sindre Sorhus -@license MIT -*/ -"use strict";var n=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;function o(t){if(null==t)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(t)}e.exports=function(){try{if(!Object.assign)return!1;var t=new String("abc");if(t[5]="de","5"===Object.getOwnPropertyNames(t)[0])return!1;for(var e={},r=0;r<10;r++)e["_"+String.fromCharCode(r)]=r;if("0123456789"!==Object.getOwnPropertyNames(e).map((function(t){return e[t]})).join(""))return!1;var n={};return"abcdefghijklmnopqrst".split("").forEach((function(t){n[t]=t})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},n)).join("")}catch(t){return!1}}()?Object.assign:function(t,e){for(var r,s,l=o(t),c=1;c1e4)throw Error("References have circular dependency. Please, check them.");r[n]=t})),n=n.reverse(),r=r.map((function(e){return n.forEach((function(r){e=e.replace(new RegExp("(\\"+i+r+"\\"+i+")","g"),t[0]+"$1"+t[1])})),e}))}));var o=new RegExp("\\"+i+"([0-9]+)\\"+i);return a?r:function t(e,r,n){for(var i,a=[],s=0;i=o.exec(e);){if(s++>1e4)throw Error("Circular references in parenthesis");a.push(e.slice(0,i.index)),a.push(t(r[i[1]],r)),e=e.slice(i.index+i[0].length)}return a.push(e),a}(r[0],r)}function i(t,e){if(e&&e.flat){var r,n=e&&e.escape||"___",i=t[0];if(!i)return"";for(var a=new RegExp("\\"+n+"([0-9]+)\\"+n),o=0;i!=r;){if(o++>1e4)throw Error("Circular references in "+t);r=i,i=i.replace(a,s)}return i}return t.reduce((function t(e,r){return Array.isArray(r)&&(r=r.reduce(t,"")),e+r}),"");function s(e,r){if(null==t[r])throw Error("Reference "+r+"is undefined");return t[r]}}function a(t,e){return Array.isArray(t)?i(t,e):n(t,e)}a.parse=n,a.stringify=i,e.exports=a},{}],244:[function(t,e,r){"use strict";var n=t("pick-by-alias");e.exports=function(t){var e;arguments.length>1&&(t=arguments);"string"==typeof t?t=t.split(/\s/).map(parseFloat):"number"==typeof t&&(t=[t]);t.length&&"number"==typeof t[0]?e=1===t.length?{width:t[0],height:t[0],x:0,y:0}:2===t.length?{width:t[0],height:t[1],x:0,y:0}:{x:t[0],y:t[1],width:t[2]-t[0]||0,height:t[3]-t[1]||0}:t&&(t=n(t,{left:"x l left Left",top:"y t top Top",width:"w width W Width",height:"h height W Width",bottom:"b bottom Bottom",right:"r right Right"}),e={x:t.left||0,y:t.top||0},null==t.width?t.right?e.width=t.right-e.x:e.width=0:e.width=t.width,null==t.height?t.bottom?e.height=t.bottom-e.y:e.height=0:e.height=t.height);return e}},{"pick-by-alias":248}],245:[function(t,e,r){e.exports=function(t){var e=[];return t.replace(i,(function(t,r,i){var o=r.toLowerCase();for(i=function(t){var e=t.match(a);return e?e.map(Number):[]}(i),"m"==o&&i.length>2&&(e.push([r].concat(i.splice(0,2))),o="l",r="m"==r?"l":"L");;){if(i.length==n[o])return i.unshift(r),e.push(i);if(i.length=-t},pointBetween:function(e,r,n){var i=e[1]-r[1],a=n[0]-r[0],o=e[0]-r[0],s=n[1]-r[1],l=o*a+i*s;return!(l-t)},pointsSameX:function(e,r){return Math.abs(e[0]-r[0])t!=o-i>t&&(a-c)*(i-u)/(o-u)+c-n>t&&(s=!s),a=c,o=u}return s}};return e}},{}],252:[function(t,e,r){var n={toPolygon:function(t,e){function r(e){if(e.length<=0)return t.segments({inverted:!1,regions:[]});function r(e){var r=e.slice(0,e.length-1);return t.segments({inverted:!1,regions:[r]})}for(var n=r(e[0]),i=1;i0}))}function u(t,n){var i=t.seg,a=n.seg,o=i.start,s=i.end,c=a.start,u=a.end;r&&r.checkIntersection(i,a);var f=e.linesIntersect(o,s,c,u);if(!1===f){if(!e.pointsCollinear(o,s,c))return!1;if(e.pointsSame(o,u)||e.pointsSame(s,c))return!1;var h=e.pointsSame(o,c),p=e.pointsSame(s,u);if(h&&p)return n;var d=!h&&e.pointBetween(o,c,u),m=!p&&e.pointBetween(s,c,u);if(h)return m?l(n,s):l(t,u),n;d&&(p||(m?l(n,s):l(t,u)),l(n,o))}else 0===f.alongA&&(-1===f.alongB?l(t,c):0===f.alongB?l(t,f.pt):1===f.alongB&&l(t,u)),0===f.alongB&&(-1===f.alongA?l(n,o):0===f.alongA?l(n,f.pt):1===f.alongA&&l(n,s));return!1}for(var f=[];!a.isEmpty();){var h=a.getHead();if(r&&r.vert(h.pt[0]),h.isStart){r&&r.segmentNew(h.seg,h.primary);var p=c(h),d=p.before?p.before.ev:null,m=p.after?p.after.ev:null;function g(){if(d){var t=u(h,d);if(t)return t}return!!m&&u(h,m)}r&&r.tempStatus(h.seg,!!d&&d.seg,!!m&&m.seg);var v,y=g();if(y){var x;if(t)(x=null===h.seg.myFill.below||h.seg.myFill.above!==h.seg.myFill.below)&&(y.seg.myFill.above=!y.seg.myFill.above);else y.seg.otherFill=h.seg.myFill;r&&r.segmentUpdate(y.seg),h.other.remove(),h.remove()}if(a.getHead()!==h){r&&r.rewind(h.seg);continue}if(t)x=null===h.seg.myFill.below||h.seg.myFill.above!==h.seg.myFill.below,h.seg.myFill.below=m?m.seg.myFill.above:i,h.seg.myFill.above=x?!h.seg.myFill.below:h.seg.myFill.below;else if(null===h.seg.otherFill)v=m?h.primary===m.primary?m.seg.otherFill.above:m.seg.myFill.above:h.primary?o:i,h.seg.otherFill={above:v,below:v};r&&r.status(h.seg,!!d&&d.seg,!!m&&m.seg),h.other.status=p.insert(n.node({ev:h}))}else{var b=h.status;if(null===b)throw new Error("PolyBool: Zero-length segment detected; your epsilon is probably too small or too large");if(s.exists(b.prev)&&s.exists(b.next)&&u(b.prev.ev,b.next.ev),r&&r.statusRemove(b.ev.seg),b.remove(),!h.primary){var _=h.seg.myFill;h.seg.myFill=h.seg.otherFill,h.seg.otherFill=_}f.push(h.seg)}a.getHead().remove()}return r&&r.done(),f}return t?{addRegion:function(t){for(var n,i,a,o=t[t.length-1],l=0;l0&&!this.aborted;){var r=this.ifds_to_read.shift();r.offset&&this.scan_ifd(r.id,r.offset,t)}},a.prototype.read_uint16=function(t){var e=this.input;if(t+2>e.length)throw n("unexpected EOF","EBADDATA");return this.big_endian?256*e[t]+e[t+1]:e[t]+256*e[t+1]},a.prototype.read_uint32=function(t){var e=this.input;if(t+4>e.length)throw n("unexpected EOF","EBADDATA");return this.big_endian?16777216*e[t]+65536*e[t+1]+256*e[t+2]+e[t+3]:e[t]+256*e[t+1]+65536*e[t+2]+16777216*e[t+3]},a.prototype.is_subifd_link=function(t,e){return 0===t&&34665===e||0===t&&34853===e||34665===t&&40965===e},a.prototype.exif_format_length=function(t){switch(t){case 1:case 2:case 6:case 7:return 1;case 3:case 8:return 2;case 4:case 9:case 11:return 4;case 5:case 10:case 12:return 8;default:return 0}},a.prototype.exif_format_read=function(t,e){var r;switch(t){case 1:case 2:return r=this.input[e];case 6:return(r=this.input[e])|33554430*(128&r);case 3:return r=this.read_uint16(e);case 8:return(r=this.read_uint16(e))|131070*(32768&r);case 4:return r=this.read_uint32(e);case 9:return 0|(r=this.read_uint32(e));case 5:case 10:case 11:case 12:case 7:default:return null}},a.prototype.scan_ifd=function(t,e,r){var a=this.read_uint16(e);e+=2;for(var o=0;othis.input.length)throw n("unexpected EOF","EBADDATA");for(var d=[],m=h,g=0;g0&&(this.ifds_to_read.push({id:s,offset:d[0]}),p=!0),!1===r({is_big_endian:this.big_endian,ifd:t,tag:s,format:l,count:c,entry_offset:e+this.start,data_length:f,data_offset:h+this.start,value:d,is_subifd_link:p}))return void(this.aborted=!0);e+=12}0===t&&this.ifds_to_read.push({id:1,offset:this.read_uint32(e)})},e.exports.ExifParser=a,e.exports.get_orientation=function(t){var e=0;try{return new a(t,0,t.length).each((function(t){if(0===t.ifd&&274===t.tag&&Array.isArray(t.value))return e=t.value[0],!1})),e}catch(t){return-1}}},{}],259:[function(t,e,r){"use strict";var n=t("./common").readUInt16BE,i=t("./common").readUInt32BE;function a(t,e){if(t.length<4+e)return null;var r=i(t,e);return t.length>4&15,i=15&t[4],a=t[5]>>4&15,o=n(t,6),l=8,c=0;ce.width||t.width===e.width&&t.height>e.height?t:e})),i=r.reduce((function(t,e){return t.height>e.height||t.height===e.height&&t.width>e.width?t:e})),n.width>i.height||n.width===i.height&&n.height>i.width?n:i),s=1;e.transforms.forEach((function(t){var e={1:6,2:5,3:8,4:7,5:4,6:3,7:2,8:1},r={1:4,2:3,3:2,4:1,5:6,6:5,7:8,8:7};if("imir"===t.type&&(s=0===t.value?r[s]:e[s=e[s=r[s]]]),"irot"===t.type)for(var n=0;n1&&(h.variants=f.variants),f.orientation&&(h.orientation=f.orientation),f.exif_location&&f.exif_location.offset+f.exif_location.length<=t.length){var p=a(t,f.exif_location.offset),d=t.slice(f.exif_location.offset+p+4,f.exif_location.offset+f.exif_location.length),m=s.get_orientation(d);m>0&&(h.orientation=m)}return h}}}}}}},{"../common":257,"../exif_utils":258,"../miaf_utils":259}],261:[function(t,e,r){"use strict";var n=t("../common").str2arr,i=t("../common").sliceEq,a=t("../common").readUInt16LE,o=n("BM");e.exports=function(t){if(!(t.length<26)&&i(t,0,o))return{width:a(t,18),height:a(t,22),type:"bmp",mime:"image/bmp",wUnits:"px",hUnits:"px"}}},{"../common":257}],262:[function(t,e,r){"use strict";var n=t("../common").str2arr,i=t("../common").sliceEq,a=t("../common").readUInt16LE,o=n("GIF87a"),s=n("GIF89a");e.exports=function(t){if(!(t.length<10)&&(i(t,0,o)||i(t,0,s)))return{width:a(t,6),height:a(t,8),type:"gif",mime:"image/gif",wUnits:"px",hUnits:"px"}}},{"../common":257}],263:[function(t,e,r){"use strict";var n=t("../common").readUInt16LE;e.exports=function(t){var e=n(t,0),r=n(t,2),i=n(t,4);if(0===e&&1===r&&i){for(var a=[],o={width:0,height:0},s=0;so.width||c>o.height)&&(o=u)}return{width:o.width,height:o.height,variants:a,type:"ico",mime:"image/x-icon",wUnits:"px",hUnits:"px"}}}},{"../common":257}],264:[function(t,e,r){"use strict";var n=t("../common").readUInt16BE,i=t("../common").str2arr,a=t("../common").sliceEq,o=t("../exif_utils"),s=i("Exif\0\0");e.exports=function(t){if(!(t.length<2)&&255===t[0]&&216===t[1]&&255===t[2])for(var e=2;;){for(;;){if(t.length-e<2)return;if(255===t[e++])break}for(var r,i,l=t[e++];255===l;)l=t[e++];if(208<=l&&l<=217||1===l)r=0;else{if(!(192<=l&&l<=254))return;if(t.length-e<2)return;r=n(t,e)-2,e+=2}if(217===l||218===l)return;if(225===l&&r>=10&&a(t,e,s)&&(i=o.get_orientation(t.slice(e+6,e+r))),r>=5&&192<=l&&l<=207&&196!==l&&200!==l&&204!==l){if(t.length-e0&&(c.orientation=i),c}e+=r}}},{"../common":257,"../exif_utils":258}],265:[function(t,e,r){"use strict";var n=t("../common").str2arr,i=t("../common").sliceEq,a=t("../common").readUInt32BE,o=n("\x89PNG\r\n\x1a\n"),s=n("IHDR");e.exports=function(t){if(!(t.length<24)&&i(t,0,o)&&i(t,12,s))return{width:a(t,16),height:a(t,20),type:"png",mime:"image/png",wUnits:"px",hUnits:"px"}}},{"../common":257}],266:[function(t,e,r){"use strict";var n=t("../common").str2arr,i=t("../common").sliceEq,a=t("../common").readUInt32BE,o=n("8BPS\0\x01");e.exports=function(t){if(!(t.length<22)&&i(t,0,o))return{width:a(t,18),height:a(t,14),type:"psd",mime:"image/vnd.adobe.photoshop",wUnits:"px",hUnits:"px"}}},{"../common":257}],267:[function(t,e,r){"use strict";function n(t){return"number"==typeof t&&isFinite(t)&&t>0}var i=/<[-_.:a-zA-Z0-9][^>]*>/,a=/^<([-_.:a-zA-Z0-9]+:)?svg\s/,o=/[^-]\bwidth="([^%]+?)"|[^-]\bwidth='([^%]+?)'/,s=/\bheight="([^%]+?)"|\bheight='([^%]+?)'/,l=/\bview[bB]ox="(.+?)"|\bview[bB]ox='(.+?)'/,c=/in$|mm$|cm$|pt$|pc$|px$|em$|ex$/;function u(t){return c.test(t)?t.match(c)[0]:"px"}e.exports=function(t){if(function(t){var e,r=0,n=t.length;for(239===t[0]&&187===t[1]&&191===t[2]&&(r=3);r>14&16383),type:"webp",mime:"image/webp",wUnits:"px",hUnits:"px"}}}function h(t,e){return{width:1+(t[e+6]<<16|t[e+5]<<8|t[e+4]),height:1+(t[e+9]<t.length)){for(;e+8=10?r=r||u(t,e+8):"VP8L"===p&&d>=9?r=r||f(t,e+8):"VP8X"===p&&d>=10?r=r||h(t,e+8):"EXIF"===p&&(n=s.get_orientation(t.slice(e+8,e+8+d)),e=1/0),e+=8+d}else e++;if(r)return n>0&&(r.orientation=n),r}}}},{"../common":257,"../exif_utils":258}],270:[function(t,e,r){"use strict";e.exports={avif:t("./parse_sync/avif"),bmp:t("./parse_sync/bmp"),gif:t("./parse_sync/gif"),ico:t("./parse_sync/ico"),jpeg:t("./parse_sync/jpeg"),png:t("./parse_sync/png"),psd:t("./parse_sync/psd"),svg:t("./parse_sync/svg"),tiff:t("./parse_sync/tiff"),webp:t("./parse_sync/webp")}},{"./parse_sync/avif":260,"./parse_sync/bmp":261,"./parse_sync/gif":262,"./parse_sync/ico":263,"./parse_sync/jpeg":264,"./parse_sync/png":265,"./parse_sync/psd":266,"./parse_sync/svg":267,"./parse_sync/tiff":268,"./parse_sync/webp":269}],271:[function(t,e,r){"use strict";var n=t("./lib/parsers_sync");e.exports=function(t){return function(t){for(var e=Object.keys(n),r=0;r1)for(var r=1;r1&&(t.scaleRatio=[t.scale[0]*t.viewport.width,t.scale[1]*t.viewport.height],r(t),t.after&&t.after(t))}function T(t){if(t){null!=t.length?"number"==typeof t[0]&&(t=[{positions:t}]):Array.isArray(t)||(t=[t]);var e=0,r=0;if(b.groups=x=t.map((function(t,c){var u=x[c];return t?("function"==typeof t?t={after:t}:"number"==typeof t[0]&&(t={positions:t}),t=o(t,{color:"color colors fill",capSize:"capSize cap capsize cap-size",lineWidth:"lineWidth line-width width line thickness",opacity:"opacity alpha",range:"range dataBox",viewport:"viewport viewBox",errors:"errors error",positions:"positions position data points"}),u||(x[c]=u={id:c,scale:null,translate:null,scaleFract:null,translateFract:null,draw:!0},t=s({},y,t)),a(u,t,[{lineWidth:function(t){return.5*+t},capSize:function(t){return.5*+t},opacity:parseFloat,errors:function(t){return t=l(t),r+=t.length,t},positions:function(t,r){return t=l(t,"float64"),r.count=Math.floor(t.length/2),r.bounds=n(t,2),r.offset=e,e+=r.count,t}},{color:function(t,e){var r=e.count;if(t||(t="transparent"),!Array.isArray(t)||"number"==typeof t[0]){var n=t;t=Array(r);for(var a=0;a 0. && baClipping < length(normalWidth * endBotJoin)) {\n\t\t//handle miter clipping\n\t\tbTopCoord -= normalWidth * endTopJoin;\n\t\tbTopCoord += normalize(endTopJoin * normalWidth) * baClipping;\n\t}\n\n\tif (nextReverse) {\n\t\t//make join rectangular\n\t\tvec2 miterShift = normalWidth * endJoinDirection * miterLimit * .5;\n\t\tfloat normalAdjust = 1. - min(miterLimit / endMiterRatio, 1.);\n\t\tbBotCoord = bCoord + miterShift - normalAdjust * normalWidth * currNormal * .5;\n\t\tbTopCoord = bCoord + miterShift + normalAdjust * normalWidth * currNormal * .5;\n\t}\n\telse if (!prevReverse && abClipping > 0. && abClipping < length(normalWidth * startBotJoin)) {\n\t\t//handle miter clipping\n\t\taBotCoord -= normalWidth * startBotJoin;\n\t\taBotCoord += normalize(startBotJoin * normalWidth) * abClipping;\n\t}\n\n\tvec2 aTopPosition = (aTopCoord) * adjustedScale + translate;\n\tvec2 aBotPosition = (aBotCoord) * adjustedScale + translate;\n\n\tvec2 bTopPosition = (bTopCoord) * adjustedScale + translate;\n\tvec2 bBotPosition = (bBotCoord) * adjustedScale + translate;\n\n\t//position is normalized 0..1 coord on the screen\n\tvec2 position = (aTopPosition * lineTop + aBotPosition * lineBot) * lineStart + (bTopPosition * lineTop + bBotPosition * lineBot) * lineEnd;\n\n\tstartCoord = aCoord * scaleRatio + translate * viewport.zw + viewport.xy;\n\tendCoord = bCoord * scaleRatio + translate * viewport.zw + viewport.xy;\n\n\tgl_Position = vec4(position * 2.0 - 1.0, depth, 1);\n\n\tenableStartMiter = step(dot(currTangent, prevTangent), .5);\n\tenableEndMiter = step(dot(currTangent, nextTangent), .5);\n\n\t//bevel miter cutoffs\n\tif (miterMode == 1.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tvec2 startMiterWidth = vec2(startJoinDirection) * thickness * miterLimit * .5;\n\t\t\tstartCutoff = vec4(aCoord, aCoord);\n\t\t\tstartCutoff.zw += vec2(-startJoinDirection.y, startJoinDirection.x) / scaleRatio;\n\t\t\tstartCutoff = startCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tstartCutoff += viewport.xyxy;\n\t\t\tstartCutoff += startMiterWidth.xyxy;\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tvec2 endMiterWidth = vec2(endJoinDirection) * thickness * miterLimit * .5;\n\t\t\tendCutoff = vec4(bCoord, bCoord);\n\t\t\tendCutoff.zw += vec2(-endJoinDirection.y, endJoinDirection.x) / scaleRatio;\n\t\t\tendCutoff = endCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tendCutoff += viewport.xyxy;\n\t\t\tendCutoff += endMiterWidth.xyxy;\n\t\t}\n\t}\n\n\t//round miter cutoffs\n\telse if (miterMode == 2.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tvec2 startMiterWidth = vec2(startJoinDirection) * thickness * abs(dot(startJoinDirection, currNormal)) * .5;\n\t\t\tstartCutoff = vec4(aCoord, aCoord);\n\t\t\tstartCutoff.zw += vec2(-startJoinDirection.y, startJoinDirection.x) / scaleRatio;\n\t\t\tstartCutoff = startCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tstartCutoff += viewport.xyxy;\n\t\t\tstartCutoff += startMiterWidth.xyxy;\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tvec2 endMiterWidth = vec2(endJoinDirection) * thickness * abs(dot(endJoinDirection, currNormal)) * .5;\n\t\t\tendCutoff = vec4(bCoord, bCoord);\n\t\t\tendCutoff.zw += vec2(-endJoinDirection.y, endJoinDirection.x) / scaleRatio;\n\t\t\tendCutoff = endCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tendCutoff += viewport.xyxy;\n\t\t\tendCutoff += endMiterWidth.xyxy;\n\t\t}\n\t}\n}\n"]),frag:o(["precision highp float;\n#define GLSLIFY 1\n\nuniform float dashLength, pixelRatio, thickness, opacity, id, miterMode;\nuniform sampler2D dashTexture;\n\nvarying vec4 fragColor;\nvarying vec2 tangent;\nvarying vec4 startCutoff, endCutoff;\nvarying vec2 startCoord, endCoord;\nvarying float enableStartMiter, enableEndMiter;\n\nfloat distToLine(vec2 p, vec2 a, vec2 b) {\n\tvec2 diff = b - a;\n\tvec2 perp = normalize(vec2(-diff.y, diff.x));\n\treturn dot(p - a, perp);\n}\n\nvoid main() {\n\tfloat alpha = 1., distToStart, distToEnd;\n\tfloat cutoff = thickness * .5;\n\n\t//bevel miter\n\tif (miterMode == 1.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tdistToStart = distToLine(gl_FragCoord.xy, startCutoff.xy, startCutoff.zw);\n\t\t\tif (distToStart < -1.) {\n\t\t\t\tdiscard;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\talpha *= min(max(distToStart + 1., 0.), 1.);\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tdistToEnd = distToLine(gl_FragCoord.xy, endCutoff.xy, endCutoff.zw);\n\t\t\tif (distToEnd < -1.) {\n\t\t\t\tdiscard;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\talpha *= min(max(distToEnd + 1., 0.), 1.);\n\t\t}\n\t}\n\n\t// round miter\n\telse if (miterMode == 2.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tdistToStart = distToLine(gl_FragCoord.xy, startCutoff.xy, startCutoff.zw);\n\t\t\tif (distToStart < 0.) {\n\t\t\t\tfloat radius = length(gl_FragCoord.xy - startCoord);\n\n\t\t\t\tif(radius > cutoff + .5) {\n\t\t\t\t\tdiscard;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\talpha -= smoothstep(cutoff - .5, cutoff + .5, radius);\n\t\t\t}\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tdistToEnd = distToLine(gl_FragCoord.xy, endCutoff.xy, endCutoff.zw);\n\t\t\tif (distToEnd < 0.) {\n\t\t\t\tfloat radius = length(gl_FragCoord.xy - endCoord);\n\n\t\t\t\tif(radius > cutoff + .5) {\n\t\t\t\t\tdiscard;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\talpha -= smoothstep(cutoff - .5, cutoff + .5, radius);\n\t\t\t}\n\t\t}\n\t}\n\n\tfloat t = fract(dot(tangent, gl_FragCoord.xy) / dashLength) * .5 + .25;\n\tfloat dash = texture2D(dashTexture, vec2(t, .5)).r;\n\n\tgl_FragColor = fragColor;\n\tgl_FragColor.a *= alpha * opacity * dash;\n}\n"]),attributes:{lineEnd:{buffer:r,divisor:0,stride:8,offset:0},lineTop:{buffer:r,divisor:0,stride:8,offset:4},aColor:{buffer:t.prop("colorBuffer"),stride:4,offset:0,divisor:1},bColor:{buffer:t.prop("colorBuffer"),stride:4,offset:4,divisor:1},prevCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:0,divisor:1},aCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:8,divisor:1},bCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:16,divisor:1},nextCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:24,divisor:1}}},n))}catch(t){e=i}return{fill:t({primitive:"triangle",elements:function(t,e){return e.triangles},offset:0,vert:o(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec2 position, positionFract;\n\nuniform vec4 color;\nuniform vec2 scale, scaleFract, translate, translateFract;\nuniform float pixelRatio, id;\nuniform vec4 viewport;\nuniform float opacity;\n\nvarying vec4 fragColor;\n\nconst float MAX_LINES = 256.;\n\nvoid main() {\n\tfloat depth = (MAX_LINES - 4. - id) / (MAX_LINES);\n\n\tvec2 position = position * scale + translate\n + positionFract * scale + translateFract\n + position * scaleFract\n + positionFract * scaleFract;\n\n\tgl_Position = vec4(position * 2.0 - 1.0, depth, 1);\n\n\tfragColor = color / 255.;\n\tfragColor.a *= opacity;\n}\n"]),frag:o(["precision highp float;\n#define GLSLIFY 1\n\nvarying vec4 fragColor;\n\nvoid main() {\n\tgl_FragColor = fragColor;\n}\n"]),uniforms:{scale:t.prop("scale"),color:t.prop("fill"),scaleFract:t.prop("scaleFract"),translateFract:t.prop("translateFract"),translate:t.prop("translate"),opacity:t.prop("opacity"),pixelRatio:t.context("pixelRatio"),id:t.prop("id"),viewport:function(t,e){return[e.viewport.x,e.viewport.y,t.viewportWidth,t.viewportHeight]}},attributes:{position:{buffer:t.prop("positionBuffer"),stride:8,offset:8},positionFract:{buffer:t.prop("positionFractBuffer"),stride:8,offset:8}},blend:n.blend,depth:{enable:!1},scissor:n.scissor,stencil:n.stencil,viewport:n.viewport}),rect:i,miter:e}},v.defaults={dashes:null,join:"miter",miterLimit:1,thickness:10,cap:"square",color:"black",opacity:1,overlay:!1,viewport:null,range:null,close:!1,fill:null},v.prototype.render=function(){for(var t,e=[],r=arguments.length;r--;)e[r]=arguments[r];e.length&&(t=this).update.apply(t,e),this.draw()},v.prototype.draw=function(){for(var t=this,e=[],r=arguments.length;r--;)e[r]=arguments[r];return(e.length?e:this.passes).forEach((function(e,r){var n;if(e&&Array.isArray(e))return(n=t).draw.apply(n,e);"number"==typeof e&&(e=t.passes[e]),e&&e.count>1&&e.opacity&&(t.regl._refresh(),e.fill&&e.triangles&&e.triangles.length>2&&t.shaders.fill(e),e.thickness&&(e.scale[0]*e.viewport.width>v.precisionThreshold||e.scale[1]*e.viewport.height>v.precisionThreshold||"rect"===e.join||!e.join&&(e.thickness<=2||e.count>=v.maxPoints)?t.shaders.rect(e):t.shaders.miter(e)))})),this},v.prototype.update=function(t){var e=this;if(t){null!=t.length?"number"==typeof t[0]&&(t=[{positions:t}]):Array.isArray(t)||(t=[t]);var r=this.regl,o=this.gl;if(t.forEach((function(t,f){var d=e.passes[f];if(void 0!==t)if(null!==t){if("number"==typeof t[0]&&(t={positions:t}),t=s(t,{positions:"positions points data coords",thickness:"thickness lineWidth lineWidths line-width linewidth width stroke-width strokewidth strokeWidth",join:"lineJoin linejoin join type mode",miterLimit:"miterlimit miterLimit",dashes:"dash dashes dasharray dash-array dashArray",color:"color colour stroke colors colours stroke-color strokeColor",fill:"fill fill-color fillColor",opacity:"alpha opacity",overlay:"overlay crease overlap intersect",close:"closed close closed-path closePath",range:"range dataBox",viewport:"viewport viewBox",hole:"holes hole hollow",splitNull:"splitNull"}),d||(e.passes[f]=d={id:f,scale:null,scaleFract:null,translate:null,translateFract:null,count:0,hole:[],depth:0,dashLength:1,dashTexture:r.texture({channels:1,data:new Uint8Array([255]),width:1,height:1,mag:"linear",min:"linear"}),colorBuffer:r.buffer({usage:"dynamic",type:"uint8",data:new Uint8Array}),positionBuffer:r.buffer({usage:"dynamic",type:"float",data:new Uint8Array}),positionFractBuffer:r.buffer({usage:"dynamic",type:"float",data:new Uint8Array})},t=a({},v.defaults,t)),null!=t.thickness&&(d.thickness=parseFloat(t.thickness)),null!=t.opacity&&(d.opacity=parseFloat(t.opacity)),null!=t.miterLimit&&(d.miterLimit=parseFloat(t.miterLimit)),null!=t.overlay&&(d.overlay=!!t.overlay,f=D}));(I=I.slice(0,R)).push(D)}for(var F=function(t){var e=k.slice(2*z,2*I[t]).concat(D?k.slice(2*D):[]),r=(d.hole||[]).map((function(e){return e-D+(I[t]-z)})),n=c(e,r);n=n.map((function(e){return e+z+(e+zt.length)&&(e=t.length);for(var r=0,n=new Array(e);r 1.0 + delta) {\n\t\tdiscard;\n\t}\n\n\talpha -= smoothstep(1.0 - delta, 1.0 + delta, radius);\n\n\tfloat borderRadius = fragBorderRadius;\n\tfloat ratio = smoothstep(borderRadius - delta, borderRadius + delta, radius);\n\tvec4 color = mix(fragColor, fragBorderColor, ratio);\n\tcolor.a *= alpha * opacity;\n\tgl_FragColor = color;\n}\n"]),l.vert=h(["precision highp float;\n#define GLSLIFY 1\n\nattribute float x, y, xFract, yFract;\nattribute float size, borderSize;\nattribute vec4 colorId, borderColorId;\nattribute float isActive;\n\nuniform bool constPointSize;\nuniform float pixelRatio;\nuniform vec2 paletteSize, scale, scaleFract, translate, translateFract;\nuniform sampler2D paletteTexture;\n\nconst float maxSize = 100.;\n\nvarying vec4 fragColor, fragBorderColor;\nvarying float fragBorderRadius, fragWidth;\n\nfloat pointSizeScale = (constPointSize) ? 2. : pixelRatio;\n\nbool isDirect = (paletteSize.x < 1.);\n\nvec4 getColor(vec4 id) {\n return isDirect ? id / 255. : texture2D(paletteTexture,\n vec2(\n (id.x + .5) / paletteSize.x,\n (id.y + .5) / paletteSize.y\n )\n );\n}\n\nvoid main() {\n // ignore inactive points\n if (isActive == 0.) return;\n\n vec2 position = vec2(x, y);\n vec2 positionFract = vec2(xFract, yFract);\n\n vec4 color = getColor(colorId);\n vec4 borderColor = getColor(borderColorId);\n\n float size = size * maxSize / 255.;\n float borderSize = borderSize * maxSize / 255.;\n\n gl_PointSize = (size + borderSize) * pointSizeScale;\n\n vec2 pos = (position + translate) * scale\n + (positionFract + translateFract) * scale\n + (position + translate) * scaleFract\n + (positionFract + translateFract) * scaleFract;\n\n gl_Position = vec4(pos * 2. - 1., 0., 1.);\n\n fragBorderRadius = 1. - 2. * borderSize / (size + borderSize);\n fragColor = color;\n fragBorderColor = borderColor.a == 0. || borderSize == 0. ? vec4(color.rgb, 0.) : borderColor;\n fragWidth = 1. / gl_PointSize;\n}\n"]),g&&(l.frag=l.frag.replace("smoothstep","smoothStep"),s.frag=s.frag.replace("smoothstep","smoothStep")),this.drawCircle=t(l)}b.defaults={color:"black",borderColor:"transparent",borderSize:0,size:12,opacity:1,marker:void 0,viewport:null,range:null,pixelSize:null,count:0,offset:0,bounds:null,positions:[],snap:1e4},b.prototype.render=function(){return arguments.length&&this.update.apply(this,arguments),this.draw(),this},b.prototype.draw=function(){for(var t=this,e=arguments.length,r=new Array(e),n=0;nn)?e.tree=u(t,{bounds:f}):n&&n.length&&(e.tree=n),e.tree){var h={primitive:"points",usage:"static",data:e.tree,type:"uint32"};e.elements?e.elements(h):e.elements=o.elements(h)}var p=v.float32(t);return i({data:p,usage:"dynamic"}),a({data:v.fract32(t,p),usage:"dynamic"}),s({data:new Uint8Array(c),type:"uint8",usage:"stream"}),t}},{marker:function(e,r,n){var i=r.activation;if(i.forEach((function(t){return t&&t.destroy&&t.destroy()})),i.length=0,e&&"number"!=typeof e[0]){for(var a=[],s=0,l=Math.min(e.length,r.count);s=0)return a;if(t instanceof Uint8Array||t instanceof Uint8ClampedArray)e=t;else{e=new Uint8Array(t.length);for(var o=0,s=t.length;o4*n&&(this.tooManyColors=!0),this.updatePalette(r),1===i.length?i[0]:i},b.prototype.updatePalette=function(t){if(!this.tooManyColors){var e=this.maxColors,r=this.paletteTexture,n=Math.ceil(.25*t.length/e);if(n>1)for(var i=.25*(t=t.slice()).length%e;i2?(s[0],s[2],n=s[1],i=s[3]):s.length?(n=s[0],i=s[1]):(s.x,n=s.y,s.x+s.width,i=s.y+s.height),l.length>2?(a=l[0],o=l[2],l[1],l[3]):l.length?(a=l[0],o=l[1]):(a=l.x,l.y,o=l.x+l.width,l.y+l.height),[a,n,o,i]}function p(t){if("number"==typeof t)return[t,t,t,t];if(2===t.length)return[t[0],t[1],t[0],t[1]];var e=l(t);return[e.x,e.y,e.x+e.width,e.y+e.height]}e.exports=u,u.prototype.render=function(){for(var t,e=this,r=[],n=arguments.length;n--;)r[n]=arguments[n];return r.length&&(t=this).update.apply(t,r),this.regl.attributes.preserveDrawingBuffer?this.draw():(this.dirty?null==this.planned&&(this.planned=o((function(){e.draw(),e.dirty=!0,e.planned=null}))):(this.draw(),this.dirty=!0,o((function(){e.dirty=!1}))),this)},u.prototype.update=function(){for(var t,e=[],r=arguments.length;r--;)e[r]=arguments[r];if(e.length){for(var n=0;nk))&&(s.lower||!(T>>=e))<<3,(e|=r=(15<(t>>>=r))<<2)|(r=(3<(t>>>=r))<<1)|t>>>r>>1}function s(){function t(t){t:{for(var e=16;268435456>=e;e*=16)if(t<=e){t=e;break t}t=0}return 0<(e=r[o(t)>>2]).length?e.pop():new ArrayBuffer(t)}function e(t){r[o(t.byteLength)>>2].push(t)}var r=a(8,(function(){return[]}));return{alloc:t,free:e,allocType:function(e,r){var n=null;switch(e){case 5120:n=new Int8Array(t(r),0,r);break;case 5121:n=new Uint8Array(t(r),0,r);break;case 5122:n=new Int16Array(t(2*r),0,r);break;case 5123:n=new Uint16Array(t(2*r),0,r);break;case 5124:n=new Int32Array(t(4*r),0,r);break;case 5125:n=new Uint32Array(t(4*r),0,r);break;case 5126:n=new Float32Array(t(4*r),0,r);break;default:return null}return n.length!==r?n.subarray(0,r):n},freeType:function(t){e(t.buffer)}}}function l(t){return!!t&&"object"==typeof t&&Array.isArray(t.shape)&&Array.isArray(t.stride)&&"number"==typeof t.offset&&t.shape.length===t.stride.length&&(Array.isArray(t.data)||J(t.data))}function c(t,e,r,n,i,a){for(var o=0;o(i=s)&&(i=n.buffer.byteLength,5123===f?i>>=1:5125===f&&(i>>=2)),n.vertCount=i,i=o,0>o&&(i=4,1===(o=n.buffer.dimension)&&(i=0),2===o&&(i=1),3===o&&(i=4)),n.primType=i}function o(t){n.elementsCount--,delete s[t.id],t.buffer.destroy(),t.buffer=null}var s={},c=0,u={uint8:5121,uint16:5123};e.oes_element_index_uint&&(u.uint32=5125),i.prototype.bind=function(){this.buffer.bind()};var f=[];return{create:function(t,e){function s(t){if(t)if("number"==typeof t)c(t),f.primType=4,f.vertCount=0|t,f.type=5121;else{var e=null,r=35044,n=-1,i=-1,o=0,h=0;Array.isArray(t)||J(t)||l(t)?e=t:("data"in t&&(e=t.data),"usage"in t&&(r=et[t.usage]),"primitive"in t&&(n=at[t.primitive]),"count"in t&&(i=0|t.count),"type"in t&&(h=u[t.type]),"length"in t?o=0|t.length:(o=i,5123===h||5122===h?o*=2:5125!==h&&5124!==h||(o*=4))),a(f,e,r,n,i,o,h)}else c(),f.primType=4,f.vertCount=0,f.type=5121;return s}var c=r.create(null,34963,!0),f=new i(c._buffer);return n.elementsCount++,s(t),s._reglType="elements",s._elements=f,s.subdata=function(t,e){return c.subdata(t,e),s},s.destroy=function(){o(f)},s},createStream:function(t){var e=f.pop();return e||(e=new i(r.create(null,34963,!0,!1)._buffer)),a(e,t,35040,-1,-1,0,0),e},destroyStream:function(t){f.push(t)},getElements:function(t){return"function"==typeof t&&t._elements instanceof i?t._elements:null},clear:function(){K(s).forEach(o)}}}function m(t){for(var e=X.allocType(5123,t.length),r=0;r>>31<<15,i=(a<<1>>>24)-127,a=a>>13&1023;e[r]=-24>i?n:-14>i?n+(a+1024>>-14-i):15>=i,r.height>>=i,p(r,n[i]),t.mipmask|=1<e;++e)t.images[e]=null;return t}function C(t){for(var e=t.images,r=0;re){for(var r=0;r=--this.refCount&&F(this)}}),o.profile&&(a.getTotalTextureSize=function(){var t=0;return Object.keys(yt).forEach((function(e){t+=yt[e].stats.size})),t}),{create2D:function(e,r){function n(t,e){var r=i.texInfo;P.call(r);var a=L();return"number"==typeof t?M(a,0|t,"number"==typeof e?0|e:0|t):t?(I(r,t),S(a,t)):M(a,1,1),r.genMipmaps&&(a.mipmask=(a.width<<1)-1),i.mipmask=a.mipmask,c(i,a),i.internalformat=a.internalformat,n.width=a.width,n.height=a.height,D(i),E(a,3553),O(r,3553),R(),C(a),o.profile&&(i.stats.size=T(i.internalformat,i.type,a.width,a.height,r.genMipmaps,!1)),n.format=tt[i.internalformat],n.type=et[i.type],n.mag=rt[r.magFilter],n.min=nt[r.minFilter],n.wrapS=it[r.wrapS],n.wrapT=it[r.wrapT],n}var i=new z(3553);return yt[i.id]=i,a.textureCount++,n(e,r),n.subimage=function(t,e,r,a){e|=0,r|=0,a|=0;var o=v();return c(o,i),o.width=0,o.height=0,p(o,t),o.width=o.width||(i.width>>a)-e,o.height=o.height||(i.height>>a)-r,D(i),d(o,3553,e,r,a),R(),k(o),n},n.resize=function(e,r){var a=0|e,s=0|r||a;if(a===i.width&&s===i.height)return n;n.width=i.width=a,n.height=i.height=s,D(i);for(var l=0;i.mipmask>>l;++l){var c=a>>l,u=s>>l;if(!c||!u)break;t.texImage2D(3553,l,i.format,c,u,0,i.format,i.type,null)}return R(),o.profile&&(i.stats.size=T(i.internalformat,i.type,a,s,!1,!1)),n},n._reglType="texture2d",n._texture=i,o.profile&&(n.stats=i.stats),n.destroy=function(){i.decRef()},n},createCube:function(e,r,n,i,s,l){function f(t,e,r,n,i,a){var s,l=h.texInfo;for(P.call(l),s=0;6>s;++s)m[s]=L();if("number"!=typeof t&&t){if("object"==typeof t)if(e)S(m[0],t),S(m[1],e),S(m[2],r),S(m[3],n),S(m[4],i),S(m[5],a);else if(I(l,t),u(h,t),"faces"in t)for(t=t.faces,s=0;6>s;++s)c(m[s],h),S(m[s],t[s]);else for(s=0;6>s;++s)S(m[s],t)}else for(t=0|t||1,s=0;6>s;++s)M(m[s],t,t);for(c(h,m[0]),h.mipmask=l.genMipmaps?(m[0].width<<1)-1:m[0].mipmask,h.internalformat=m[0].internalformat,f.width=m[0].width,f.height=m[0].height,D(h),s=0;6>s;++s)E(m[s],34069+s);for(O(l,34067),R(),o.profile&&(h.stats.size=T(h.internalformat,h.type,f.width,f.height,l.genMipmaps,!0)),f.format=tt[h.internalformat],f.type=et[h.type],f.mag=rt[l.magFilter],f.min=nt[l.minFilter],f.wrapS=it[l.wrapS],f.wrapT=it[l.wrapT],s=0;6>s;++s)C(m[s]);return f}var h=new z(34067);yt[h.id]=h,a.cubeCount++;var m=Array(6);return f(e,r,n,i,s,l),f.subimage=function(t,e,r,n,i){r|=0,n|=0,i|=0;var a=v();return c(a,h),a.width=0,a.height=0,p(a,e),a.width=a.width||(h.width>>i)-r,a.height=a.height||(h.height>>i)-n,D(h),d(a,34069+t,r,n,i),R(),k(a),f},f.resize=function(e){if((e|=0)!==h.width){f.width=h.width=e,f.height=h.height=e,D(h);for(var r=0;6>r;++r)for(var n=0;h.mipmask>>n;++n)t.texImage2D(34069+r,n,h.format,e>>n,e>>n,0,h.format,h.type,null);return R(),o.profile&&(h.stats.size=T(h.internalformat,h.type,f.width,f.height,!1,!0)),f}},f._reglType="textureCube",f._texture=h,o.profile&&(f.stats=h.stats),f.destroy=function(){h.decRef()},f},clear:function(){for(var e=0;er;++r)if(0!=(e.mipmask&1<>r,e.height>>r,0,e.internalformat,e.type,null);else for(var n=0;6>n;++n)t.texImage2D(34069+n,r,e.internalformat,e.width>>r,e.height>>r,0,e.internalformat,e.type,null);O(e.texInfo,e.target)}))},refresh:function(){for(var e=0;ei;++i){for(c=0;ct;++t)r[t].resize(n);return e.width=e.height=n,e},_reglType:"framebufferCube",destroy:function(){r.forEach((function(t){t.destroy()}))}})},clear:function(){K(k).forEach(g)},restore:function(){x.cur=null,x.next=null,x.dirty=!0,K(k).forEach((function(e){e.framebuffer=t.createFramebuffer(),v(e)}))}})}function M(){this.w=this.z=this.y=this.x=this.state=0,this.buffer=null,this.size=0,this.normalized=!1,this.type=5126,this.divisor=this.stride=this.offset=0}function S(t,e,r,n,i,a,o){function s(){this.id=++f,this.attributes=[],this.elements=null,this.ownsElements=!1,this.offset=this.count=0,this.instances=-1,this.primitive=4;var t=e.oes_vertex_array_object;this.vao=t?t.createVertexArrayOES():null,h[this.id]=this,this.buffers=[]}var c=r.maxAttributes,u=Array(c);for(r=0;r=h.byteLength?c.subdata(h):(c.destroy(),r.buffers[s]=null)),r.buffers[s]||(c=r.buffers[s]=i.create(u,34962,!1,!0)),f.buffer=i.getBuffer(c),f.size=0|f.buffer.dimension,f.normalized=!1,f.type=f.buffer.dtype,f.offset=0,f.stride=0,f.divisor=0,f.state=1,t[s]=1;else i.getBuffer(u)?(f.buffer=i.getBuffer(u),f.size=0|f.buffer.dimension,f.normalized=!1,f.type=f.buffer.dtype,f.offset=0,f.stride=0,f.divisor=0,f.state=1):i.getBuffer(u.buffer)?(f.buffer=i.getBuffer(u.buffer),f.size=0|(+u.size||f.buffer.dimension),f.normalized=!!u.normalized||!1,f.type="type"in u?tt[u.type]:f.buffer.dtype,f.offset=0|(u.offset||0),f.stride=0|(u.stride||0),f.divisor=0|(u.divisor||0),f.state=1):"x"in u&&(f.x=+u.x||0,f.y=+u.y||0,f.z=+u.z||0,f.w=+u.w||0,f.state=2)}for(c=0;ct&&(t=e.stats.uniformsCount)})),t},r.getMaxAttributesCount=function(){var t=0;return h.forEach((function(e){e.stats.attributesCount>t&&(t=e.stats.attributesCount)})),t}),{clear:function(){var e=t.deleteShader.bind(t);K(c).forEach(e),c={},K(u).forEach(e),u={},h.forEach((function(e){t.deleteProgram(e.program)})),h.length=0,f={},r.shaderCount=0},program:function(e,n,i,a){var o=f[n];o||(o=f[n]={});var p=o[e];if(p&&(p.refCount++,!a))return p;var d=new s(n,e);return r.shaderCount++,l(d,i,a),p||(o[e]=d),h.push(d),H(d,{destroy:function(){if(d.refCount--,0>=d.refCount){t.deleteProgram(d.program);var e=h.indexOf(d);h.splice(e,1),r.shaderCount--}0>=o[d.vertId].refCount&&(t.deleteShader(u[d.vertId]),delete u[d.vertId],delete f[d.fragId][d.vertId]),Object.keys(f[d.fragId]).length||(t.deleteShader(c[d.fragId]),delete c[d.fragId],delete f[d.fragId])}})},restore:function(){c={},u={};for(var t=0;t>>e|t<<32-e}function P(t,e){var r=(65535&t)+(65535&e);return(t>>16)+(e>>16)+(r>>16)<<16|65535&r}function I(t){return Array.prototype.slice.call(t)}function O(t){return I(t).join("")}function z(t){function e(){var t=[],e=[];return H((function(){t.push.apply(t,I(arguments))}),{def:function(){var r="v"+i++;return e.push(r),0>>4&15)+"0123456789abcdef".charAt(15&e);return r}(function(t){for(var e=Array(t.length>>2),r=0;r>5]|=(255&t.charCodeAt(r/8))<<24-r%32;var n,i,a,o,s,l,c,u,f,h,p,d=8*t.length;for(t=[1779033703,-1150833019,1013904242,-1521486534,1359893119,-1694144372,528734635,1541459225],r=Array(64),e[d>>5]|=128<<24-d%32,e[15+(d+64>>9<<4)]=d,u=0;uf;f++){var m;if(16>f)r[f]=e[f+u];else h=f,p=P(p=C(p=r[f-2],17)^C(p,19)^p>>>10,r[f-7]),m=C(m=r[f-15],7)^C(m,18)^m>>>3,r[h]=P(P(p,m),r[f-16]);h=P(P(P(P(c,h=C(h=o,6)^C(h,11)^C(h,25)),o&s^~o&l),kt[f]),r[f]),p=P(c=C(c=d,2)^C(c,13)^C(c,22),d&n^d&i^n&i),c=l,l=s,s=o,o=P(a,h),a=i,i=n,n=d,d=P(h,p)}t[0]=P(d,t[0]),t[1]=P(n,t[1]),t[2]=P(i,t[2]),t[3]=P(a,t[3]),t[4]=P(o,t[4]),t[5]=P(s,t[5]),t[6]=P(l,t[6]),t[7]=P(c,t[7])}for(e="",r=0;r<32*t.length;r+=8)e+=String.fromCharCode(t[r>>5]>>>24-r%32&255);return e}(function(t){for(var e,r,n="",i=-1;++i=e&&56320<=r&&57343>=r&&(e=65536+((1023&e)<<10)+(1023&r),i++),127>=e?n+=String.fromCharCode(e):2047>=e?n+=String.fromCharCode(192|e>>>6&31,128|63&e):65535>=e?n+=String.fromCharCode(224|e>>>12&15,128|e>>>6&63,128|63&e):2097151>=e&&(n+=String.fromCharCode(240|e>>>18&7,128|e>>>12&63,128|e>>>6&63,128|63&e));return n}(r))),n[e])?n[e].apply(null,o):(r=Function.apply(null,a.concat(r)),n&&(n[e]=r),r.apply(null,o))}}}function D(t){return Array.isArray(t)||J(t)||l(t)}function R(t){return t.sort((function(t,e){return"viewport"===t?-1:"viewport"===e?1:t"+e+"?"+i+".constant["+e+"]:0;"})).join(""),"}}else{","if(",s,"(",i,".buffer)){",u,"=",a,".createStream(",34962,",",i,".buffer);","}else{",u,"=",a,".getBuffer(",i,".buffer);","}",f,'="type" in ',i,"?",o.glTypes,"[",i,".type]:",u,".dtype;",l.normalized,"=!!",i,".normalized;"),n("size"),n("offset"),n("stride"),n("divisor"),r("}}"),r.exit("if(",l.isStream,"){",a,".destroyStream(",u,");","}"),l}))})),o}function M(t,e,n,i,o){function s(t){var e=c[t];e&&(h[t]=e)}var l=function(t,e){if("string"==typeof(r=t.static).frag&&"string"==typeof r.vert){if(0>1)",s],");")}function e(){r(l,".drawArraysInstancedANGLE(",[d,m,g,s],");")}p&&"null"!==p?y?t():(r("if(",p,"){"),t(),r("}else{"),e(),r("}")):e()}function o(){function t(){r(u+".drawElements("+[d,g,v,m+"<<(("+v+"-5121)>>1)"]+");")}function e(){r(u+".drawArrays("+[d,m,g]+");")}p&&"null"!==p?y?t():(r("if(",p,"){"),t(),r("}else{"),e(),r("}")):e()}var s,l,c=t.shared,u=c.gl,f=c.draw,h=n.draw,p=function(){var i=h.elements,a=e;return i?((i.contextDep&&n.contextDynamic||i.propDep)&&(a=r),i=i.append(t,a),h.elementsActive&&a("if("+i+")"+u+".bindBuffer(34963,"+i+".buffer.buffer);")):(i=a.def(),a(i,"=",f,".","elements",";","if(",i,"){",u,".bindBuffer(",34963,",",i,".buffer.buffer);}","else if(",c.vao,".currentVAO){",i,"=",t.shared.elements+".getElements("+c.vao,".currentVAO.elements);",rt?"":"if("+i+")"+u+".bindBuffer(34963,"+i+".buffer.buffer);","}")),i}(),d=i("primitive"),m=i("offset"),g=function(){var i=h.count,a=e;return i?((i.contextDep&&n.contextDynamic||i.propDep)&&(a=r),i=i.append(t,a)):i=a.def(f,".","count"),i}();if("number"==typeof g){if(0===g)return}else r("if(",g,"){"),r.exit("}");$&&(s=i("instances"),l=t.instancing);var v=p+".type",y=h.elements&&B(h.elements)&&!h.vaoActive;$&&("number"!=typeof s||0<=s)?"string"==typeof s?(r("if(",s,">0){"),a(),r("}else if(",s,"<0){"),o(),r("}")):a():o()}function q(t,e,r,n,i){return i=(e=_()).proc("body",i),$&&(e.instancing=i.def(e.shared.extensions,".angle_instanced_arrays")),t(e,i,r,n),e.compile().body}function Y(t,e,r,n){P(t,e),r.useVAO?r.drawVAO?e(t.shared.vao,".setVAO(",r.drawVAO.append(t,e),");"):e(t.shared.vao,".setVAO(",t.shared.vao,".targetVAO);"):(e(t.shared.vao,".setVAO(null);"),O(t,e,r,n.attributes,(function(){return!0}))),U(t,e,r,n.uniforms,(function(){return!0}),!1),V(t,e,e,r)}function W(t,e,r,n){function i(){return!0}t.batchId="a1",P(t,e),O(t,e,r,n.attributes,i),U(t,e,r,n.uniforms,i,!1),V(t,e,e,r)}function X(t,e,r,n){function i(t){return t.contextDep&&o||t.propDep}function a(t){return!i(t)}P(t,e);var o=r.contextDep,s=e.def(),l=e.def();t.shared.props=l,t.batchId=s;var c=t.scope(),u=t.scope();e(c.entry,"for(",s,"=0;",s,"<","a1",";++",s,"){",l,"=","a0","[",s,"];",u,"}",c.exit),r.needsContext&&S(t,u,r.context),r.needsFramebuffer&&E(t,u,r.framebuffer),C(t,u,r.state,i),r.profile&&i(r.profile)&&I(t,u,r,!1,!0),n?(r.useVAO?r.drawVAO?i(r.drawVAO)?u(t.shared.vao,".setVAO(",r.drawVAO.append(t,u),");"):c(t.shared.vao,".setVAO(",r.drawVAO.append(t,c),");"):c(t.shared.vao,".setVAO(",t.shared.vao,".targetVAO);"):(c(t.shared.vao,".setVAO(null);"),O(t,c,r,n.attributes,a),O(t,u,r,n.attributes,i)),U(t,c,r,n.uniforms,a,!1),U(t,u,r,n.uniforms,i,!0),V(t,c,u,r)):(e=t.global.def("{}"),n=r.shader.progVar.append(t,u),l=u.def(n,".id"),c=u.def(e,"[",l,"]"),u(t.shared.gl,".useProgram(",n,".program);","if(!",c,"){",c,"=",e,"[",l,"]=",t.link((function(e){return q(W,t,r,e,2)})),"(",n,");}",c,".call(this,a0[",s,"],",s,");"))}function Z(t,r){function n(e){var n=r.shader[e];n&&(n=n.append(t,i),isNaN(n)?i.set(a.shader,"."+e,n):i.set(a.shader,"."+e,t.link(n,{stable:!0})))}var i=t.proc("scope",3);t.batchId="a2";var a=t.shared,o=a.current;if(S(t,i,r.context),r.framebuffer&&r.framebuffer.append(t,i),R(Object.keys(r.state)).forEach((function(e){var n=r.state[e],o=n.append(t,i);g(o)?o.forEach((function(r,n){isNaN(r)?i.set(t.next[e],"["+n+"]",r):i.set(t.next[e],"["+n+"]",t.link(r,{stable:!0}))})):B(n)?i.set(a.next,"."+e,t.link(o,{stable:!0})):i.set(a.next,"."+e,o)})),I(t,i,r,!0,!0),["elements","offset","count","instances","primitive"].forEach((function(e){var n=r.draw[e];n&&(n=n.append(t,i),isNaN(n)?i.set(a.draw,"."+e,n):i.set(a.draw,"."+e,t.link(n),{stable:!0}))})),Object.keys(r.uniforms).forEach((function(n){var o=r.uniforms[n].append(t,i);Array.isArray(o)&&(o="["+o.map((function(e){return isNaN(e)?e:t.link(e,{stable:!0})}))+"]"),i.set(a.uniforms,"["+t.link(e.id(n),{stable:!0})+"]",o)})),Object.keys(r.attributes).forEach((function(e){var n=r.attributes[e].append(t,i),a=t.scopeAttrib(e);Object.keys(new K).forEach((function(t){i.set(a,"."+t,n[t])}))})),r.scopeVAO){var s=r.scopeVAO.append(t,i);isNaN(s)?i.set(a.vao,".targetVAO",s):i.set(a.vao,".targetVAO",t.link(s,{stable:!0}))}n("vert"),n("frag"),0=--this.refCount&&o(this)},i.profile&&(n.getTotalRenderbufferSize=function(){var t=0;return Object.keys(u).forEach((function(e){t+=u[e].stats.size})),t}),{create:function(e,r){function o(e,r){var n=0,a=0,u=32854;if("object"==typeof e&&e?("shape"in e?(n=0|(a=e.shape)[0],a=0|a[1]):("radius"in e&&(n=a=0|e.radius),"width"in e&&(n=0|e.width),"height"in e&&(a=0|e.height)),"format"in e&&(u=s[e.format])):"number"==typeof e?(n=0|e,a="number"==typeof r?0|r:n):e||(n=a=1),n!==c.width||a!==c.height||u!==c.format)return o.width=c.width=n,o.height=c.height=a,c.format=u,t.bindRenderbuffer(36161,c.renderbuffer),t.renderbufferStorage(36161,u,n,a),i.profile&&(c.stats.size=bt[c.format]*c.width*c.height),o.format=l[c.format],o}var c=new a(t.createRenderbuffer());return u[c.id]=c,n.renderbufferCount++,o(e,r),o.resize=function(e,r){var n=0|e,a=0|r||n;return n===c.width&&a===c.height||(o.width=c.width=n,o.height=c.height=a,t.bindRenderbuffer(36161,c.renderbuffer),t.renderbufferStorage(36161,c.format,n,a),i.profile&&(c.stats.size=bt[c.format]*c.width*c.height)),o},o._reglType="renderbuffer",o._renderbuffer=c,i.profile&&(o.stats=c.stats),o.destroy=function(){c.decRef()},o},clear:function(){K(u).forEach(o)},restore:function(){K(u).forEach((function(e){e.renderbuffer=t.createRenderbuffer(),t.bindRenderbuffer(36161,e.renderbuffer),t.renderbufferStorage(36161,e.format,e.width,e.height)})),t.bindRenderbuffer(36161,null)}}},wt=[];wt[6408]=4,wt[6407]=3;var Tt=[];Tt[5121]=1,Tt[5126]=4,Tt[36193]=2;var kt=[1116352408,1899447441,-1245643825,-373957723,961987163,1508970993,-1841331548,-1424204075,-670586216,310598401,607225278,1426881987,1925078388,-2132889090,-1680079193,-1046744716,-459576895,-272742522,264347078,604807628,770255983,1249150122,1555081692,1996064986,-1740746414,-1473132947,-1341970488,-1084653625,-958395405,-710438585,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,-2117940946,-1838011259,-1564481375,-1474664885,-1035236496,-949202525,-778901479,-694614492,-200395387,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,-2067236844,-1933114872,-1866530822,-1538233109,-1090935817,-965641998],At=["x","y","z","w"],Mt="blend.func blend.equation stencil.func stencil.opFront stencil.opBack sample.coverage viewport scissor.box polygonOffset.offset".split(" "),St={0:0,1:1,zero:0,one:1,"src color":768,"one minus src color":769,"src alpha":770,"one minus src alpha":771,"dst color":774,"one minus dst color":775,"dst alpha":772,"one minus dst alpha":773,"constant color":32769,"one minus constant color":32770,"constant alpha":32771,"one minus constant alpha":32772,"src alpha saturate":776},Et={never:512,less:513,"<":513,equal:514,"=":514,"==":514,"===":514,lequal:515,"<=":515,greater:516,">":516,notequal:517,"!=":517,"!==":517,gequal:518,">=":518,always:519},Lt={0:0,zero:0,keep:7680,replace:7681,increment:7682,decrement:7683,"increment wrap":34055,"decrement wrap":34056,invert:5386},Ct={cw:2304,ccw:2305},Pt=new F(!1,!1,!1,(function(){}));return function(t){function e(){if(0===K.length)T&&T.update(),et=null;else{et=Y.next(e),f();for(var t=K.length-1;0<=t;--t){var r=K[t];r&&r(I,null,0)}g.flush(),T&&T.update()}}function r(){!et&&0=K.length&&n()}}}}function u(){var t=X.viewport,e=X.scissor_box;t[0]=t[1]=e[0]=e[1]=0,I.viewportWidth=I.framebufferWidth=I.drawingBufferWidth=t[2]=e[2]=g.drawingBufferWidth,I.viewportHeight=I.framebufferHeight=I.drawingBufferHeight=t[3]=e[3]=g.drawingBufferHeight}function f(){I.tick+=1,I.time=m(),u(),q.procs.poll()}function h(){B.refresh(),u(),q.procs.refresh(),T&&T.update()}function m(){return(W()-M)/1e3}if(!(t=i(t)))return null;var g=t.gl,v=g.getContextAttributes();g.isContextLost();var y=function(t,e){function r(e){var r;e=e.toLowerCase();try{r=n[e]=t.getExtension(e)}catch(t){}return!!r}for(var n={},i=0;ie;++e)rt(H({framebuffer:t.framebuffer.faces[e]},t),l);else rt(t,l);else l(0,t)},prop:G.define.bind(null,1),context:G.define.bind(null,2),this:G.define.bind(null,3),draw:s({}),buffer:function(t){return z.create(t,34962,!1,!1)},elements:function(t){return D.create(t,!1)},texture:B.create2D,cube:B.createCube,renderbuffer:N.create,framebuffer:j.create,framebufferCube:j.createCube,vao:R.createVAO,attributes:v,frame:c,on:function(t,e){var r;switch(t){case"frame":return c(e);case"lost":r=Q;break;case"restore":r=$;break;case"destroy":r=tt}return r.push(e),{cancel:function(){for(var t=0;t */ -var n=t("buffer"),i=n.Buffer;function a(t,e){for(var r in t)e[r]=t[r]}function o(t,e,r){return i(t,e,r)}i.from&&i.alloc&&i.allocUnsafe&&i.allocUnsafeSlow?e.exports=n:(a(n,r),r.Buffer=o),o.prototype=Object.create(i.prototype),a(i,o),o.from=function(t,e,r){if("number"==typeof t)throw new TypeError("Argument must not be a number");return i(t,e,r)},o.alloc=function(t,e,r){if("number"!=typeof t)throw new TypeError("Argument must be a number");var n=i(t);return void 0!==e?"string"==typeof r?n.fill(e,r):n.fill(e):n.fill(0),n},o.allocUnsafe=function(t){if("number"!=typeof t)throw new TypeError("Argument must be a number");return i(t)},o.allocUnsafeSlow=function(t){if("number"!=typeof t)throw new TypeError("Argument must be a number");return n.SlowBuffer(t)}},{buffer:80}],280:[function(t,e,r){e.exports=i;var n=t("events").EventEmitter;function i(){n.call(this)}t("inherits")(i,n),i.Readable=t("readable-stream/lib/_stream_readable.js"),i.Writable=t("readable-stream/lib/_stream_writable.js"),i.Duplex=t("readable-stream/lib/_stream_duplex.js"),i.Transform=t("readable-stream/lib/_stream_transform.js"),i.PassThrough=t("readable-stream/lib/_stream_passthrough.js"),i.finished=t("readable-stream/lib/internal/streams/end-of-stream.js"),i.pipeline=t("readable-stream/lib/internal/streams/pipeline.js"),i.Stream=i,i.prototype.pipe=function(t,e){var r=this;function i(e){t.writable&&!1===t.write(e)&&r.pause&&r.pause()}function a(){r.readable&&r.resume&&r.resume()}r.on("data",i),t.on("drain",a),t._isStdio||e&&!1===e.end||(r.on("end",s),r.on("close",l));var o=!1;function s(){o||(o=!0,t.end())}function l(){o||(o=!0,"function"==typeof t.destroy&&t.destroy())}function c(t){if(u(),0===n.listenerCount(this,"error"))throw t}function u(){r.removeListener("data",i),t.removeListener("drain",a),r.removeListener("end",s),r.removeListener("close",l),r.removeListener("error",c),t.removeListener("error",c),r.removeListener("end",u),r.removeListener("close",u),t.removeListener("close",u)}return r.on("error",c),t.on("error",c),r.on("end",u),r.on("close",u),t.on("close",u),t.emit("pipe",r),t}},{events:181,inherits:226,"readable-stream/lib/_stream_duplex.js":282,"readable-stream/lib/_stream_passthrough.js":283,"readable-stream/lib/_stream_readable.js":284,"readable-stream/lib/_stream_transform.js":285,"readable-stream/lib/_stream_writable.js":286,"readable-stream/lib/internal/streams/end-of-stream.js":290,"readable-stream/lib/internal/streams/pipeline.js":292}],281:[function(t,e,r){"use strict";var n={};function i(t,e,r){r||(r=Error);var i=function(t){var r,n;function i(r,n,i){return t.call(this,function(t,r,n){return"string"==typeof e?e:e(t,r,n)}(r,n,i))||this}return n=t,(r=i).prototype=Object.create(n.prototype),r.prototype.constructor=r,r.__proto__=n,i}(r);i.prototype.name=r.name,i.prototype.code=t,n[t]=i}function a(t,e){if(Array.isArray(t)){var r=t.length;return t=t.map((function(t){return String(t)})),r>2?"one of ".concat(e," ").concat(t.slice(0,r-1).join(", "),", or ")+t[r-1]:2===r?"one of ".concat(e," ").concat(t[0]," or ").concat(t[1]):"of ".concat(e," ").concat(t[0])}return"of ".concat(e," ").concat(String(t))}i("ERR_INVALID_OPT_VALUE",(function(t,e){return'The value "'+e+'" is invalid for option "'+t+'"'}),TypeError),i("ERR_INVALID_ARG_TYPE",(function(t,e,r){var n,i,o,s;if("string"==typeof e&&(i="not ",e.substr(!o||o<0?0:+o,i.length)===i)?(n="must not be",e=e.replace(/^not /,"")):n="must be",function(t,e,r){return(void 0===r||r>t.length)&&(r=t.length),t.substring(r-e.length,r)===e}(t," argument"))s="The ".concat(t," ").concat(n," ").concat(a(e,"type"));else{var l=function(t,e,r){return"number"!=typeof r&&(r=0),!(r+e.length>t.length)&&-1!==t.indexOf(e,r)}(t,".")?"property":"argument";s='The "'.concat(t,'" ').concat(l," ").concat(n," ").concat(a(e,"type"))}return s+=". Received type ".concat(typeof r)}),TypeError),i("ERR_STREAM_PUSH_AFTER_EOF","stream.push() after EOF"),i("ERR_METHOD_NOT_IMPLEMENTED",(function(t){return"The "+t+" method is not implemented"})),i("ERR_STREAM_PREMATURE_CLOSE","Premature close"),i("ERR_STREAM_DESTROYED",(function(t){return"Cannot call "+t+" after a stream was destroyed"})),i("ERR_MULTIPLE_CALLBACK","Callback called multiple times"),i("ERR_STREAM_CANNOT_PIPE","Cannot pipe, not readable"),i("ERR_STREAM_WRITE_AFTER_END","write after end"),i("ERR_STREAM_NULL_VALUES","May not write null values to stream",TypeError),i("ERR_UNKNOWN_ENCODING",(function(t){return"Unknown encoding: "+t}),TypeError),i("ERR_STREAM_UNSHIFT_AFTER_END_EVENT","stream.unshift() after end event"),e.exports.codes=n},{}],282:[function(t,e,r){(function(r){(function(){"use strict";var n=Object.keys||function(t){var e=[];for(var r in t)e.push(r);return e};e.exports=c;var i=t("./_stream_readable"),a=t("./_stream_writable");t("inherits")(c,i);for(var o=n(a.prototype),s=0;s0)if("string"==typeof e||o.objectMode||Object.getPrototypeOf(e)===s.prototype||(e=function(t){return s.from(t)}(e)),n)o.endEmitted?w(t,new _):S(t,o,e,!0);else if(o.ended)w(t,new x);else{if(o.destroyed)return!1;o.reading=!1,o.decoder&&!r?(e=o.decoder.write(e),o.objectMode||0!==e.length?S(t,o,e,!1):P(t,o)):S(t,o,e,!1)}else n||(o.reading=!1,P(t,o));return!o.ended&&(o.lengthe.highWaterMark&&(e.highWaterMark=function(t){return t>=1073741824?t=1073741824:(t--,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,t|=t>>>16,t++),t}(t)),t<=e.length?t:e.ended?e.length:(e.needReadable=!0,0))}function L(t){var e=t._readableState;c("emitReadable",e.needReadable,e.emittedReadable),e.needReadable=!1,e.emittedReadable||(c("emitReadable",e.flowing),e.emittedReadable=!0,r.nextTick(C,t))}function C(t){var e=t._readableState;c("emitReadable_",e.destroyed,e.length,e.ended),e.destroyed||!e.length&&!e.ended||(t.emit("readable"),e.emittedReadable=!1),e.needReadable=!e.flowing&&!e.ended&&e.length<=e.highWaterMark,R(t)}function P(t,e){e.readingMore||(e.readingMore=!0,r.nextTick(I,t,e))}function I(t,e){for(;!e.reading&&!e.ended&&(e.length0,e.resumeScheduled&&!e.paused?e.flowing=!0:t.listenerCount("data")>0&&t.resume()}function z(t){c("readable nexttick read 0"),t.read(0)}function D(t,e){c("resume",e.reading),e.reading||t.read(0),e.resumeScheduled=!1,t.emit("resume"),R(t),e.flowing&&!e.reading&&t.read(0)}function R(t){var e=t._readableState;for(c("flow",e.flowing);e.flowing&&null!==t.read(););}function F(t,e){return 0===e.length?null:(e.objectMode?r=e.buffer.shift():!t||t>=e.length?(r=e.decoder?e.buffer.join(""):1===e.buffer.length?e.buffer.first():e.buffer.concat(e.length),e.buffer.clear()):r=e.buffer.consume(t,e.decoder),r);var r}function B(t){var e=t._readableState;c("endReadable",e.endEmitted),e.endEmitted||(e.ended=!0,r.nextTick(N,e,t))}function N(t,e){if(c("endReadableNT",t.endEmitted,t.length),!t.endEmitted&&0===t.length&&(t.endEmitted=!0,e.readable=!1,e.emit("end"),t.autoDestroy)){var r=e._writableState;(!r||r.autoDestroy&&r.finished)&&e.destroy()}}function j(t,e){for(var r=0,n=t.length;r=e.highWaterMark:e.length>0)||e.ended))return c("read: emitReadable",e.length,e.ended),0===e.length&&e.ended?B(this):L(this),null;if(0===(t=E(t,e))&&e.ended)return 0===e.length&&B(this),null;var n,i=e.needReadable;return c("need readable",i),(0===e.length||e.length-t0?F(t,e):null)?(e.needReadable=e.length<=e.highWaterMark,t=0):(e.length-=t,e.awaitDrain=0),0===e.length&&(e.ended||(e.needReadable=!0),r!==t&&e.ended&&B(this)),null!==n&&this.emit("data",n),n},A.prototype._read=function(t){w(this,new b("_read()"))},A.prototype.pipe=function(t,e){var n=this,i=this._readableState;switch(i.pipesCount){case 0:i.pipes=t;break;case 1:i.pipes=[i.pipes,t];break;default:i.pipes.push(t)}i.pipesCount+=1,c("pipe count=%d opts=%j",i.pipesCount,e);var o=(!e||!1!==e.end)&&t!==r.stdout&&t!==r.stderr?l:g;function s(e,r){c("onunpipe"),e===n&&r&&!1===r.hasUnpiped&&(r.hasUnpiped=!0,c("cleanup"),t.removeListener("close",d),t.removeListener("finish",m),t.removeListener("drain",u),t.removeListener("error",p),t.removeListener("unpipe",s),n.removeListener("end",l),n.removeListener("end",g),n.removeListener("data",h),f=!0,!i.awaitDrain||t._writableState&&!t._writableState.needDrain||u())}function l(){c("onend"),t.end()}i.endEmitted?r.nextTick(o):n.once("end",o),t.on("unpipe",s);var u=function(t){return function(){var e=t._readableState;c("pipeOnDrain",e.awaitDrain),e.awaitDrain&&e.awaitDrain--,0===e.awaitDrain&&a(t,"data")&&(e.flowing=!0,R(t))}}(n);t.on("drain",u);var f=!1;function h(e){c("ondata");var r=t.write(e);c("dest.write",r),!1===r&&((1===i.pipesCount&&i.pipes===t||i.pipesCount>1&&-1!==j(i.pipes,t))&&!f&&(c("false write response, pause",i.awaitDrain),i.awaitDrain++),n.pause())}function p(e){c("onerror",e),g(),t.removeListener("error",p),0===a(t,"error")&&w(t,e)}function d(){t.removeListener("finish",m),g()}function m(){c("onfinish"),t.removeListener("close",d),g()}function g(){c("unpipe"),n.unpipe(t)}return n.on("data",h),function(t,e,r){if("function"==typeof t.prependListener)return t.prependListener(e,r);t._events&&t._events[e]?Array.isArray(t._events[e])?t._events[e].unshift(r):t._events[e]=[r,t._events[e]]:t.on(e,r)}(t,"error",p),t.once("close",d),t.once("finish",m),t.emit("pipe",n),i.flowing||(c("pipe resume"),n.resume()),t},A.prototype.unpipe=function(t){var e=this._readableState,r={hasUnpiped:!1};if(0===e.pipesCount)return this;if(1===e.pipesCount)return t&&t!==e.pipes||(t||(t=e.pipes),e.pipes=null,e.pipesCount=0,e.flowing=!1,t&&t.emit("unpipe",this,r)),this;if(!t){var n=e.pipes,i=e.pipesCount;e.pipes=null,e.pipesCount=0,e.flowing=!1;for(var a=0;a0,!1!==i.flowing&&this.resume()):"readable"===t&&(i.endEmitted||i.readableListening||(i.readableListening=i.needReadable=!0,i.flowing=!1,i.emittedReadable=!1,c("on readable",i.length,i.reading),i.length?L(this):i.reading||r.nextTick(z,this))),n},A.prototype.addListener=A.prototype.on,A.prototype.removeListener=function(t,e){var n=o.prototype.removeListener.call(this,t,e);return"readable"===t&&r.nextTick(O,this),n},A.prototype.removeAllListeners=function(t){var e=o.prototype.removeAllListeners.apply(this,arguments);return"readable"!==t&&void 0!==t||r.nextTick(O,this),e},A.prototype.resume=function(){var t=this._readableState;return t.flowing||(c("resume"),t.flowing=!t.readableListening,function(t,e){e.resumeScheduled||(e.resumeScheduled=!0,r.nextTick(D,t,e))}(this,t)),t.paused=!1,this},A.prototype.pause=function(){return c("call pause flowing=%j",this._readableState.flowing),!1!==this._readableState.flowing&&(c("pause"),this._readableState.flowing=!1,this.emit("pause")),this._readableState.paused=!0,this},A.prototype.wrap=function(t){var e=this,r=this._readableState,n=!1;for(var i in t.on("end",(function(){if(c("wrapped end"),r.decoder&&!r.ended){var t=r.decoder.end();t&&t.length&&e.push(t)}e.push(null)})),t.on("data",(function(i){(c("wrapped data"),r.decoder&&(i=r.decoder.write(i)),r.objectMode&&null==i)||(r.objectMode||i&&i.length)&&(e.push(i)||(n=!0,t.pause()))})),t)void 0===this[i]&&"function"==typeof t[i]&&(this[i]=function(e){return function(){return t[e].apply(t,arguments)}}(i));for(var a=0;a-1))throw new _(t);return this._writableState.defaultEncoding=t,this},Object.defineProperty(A.prototype,"writableBuffer",{enumerable:!1,get:function(){return this._writableState&&this._writableState.getBuffer()}}),Object.defineProperty(A.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),A.prototype._write=function(t,e,r){r(new m("_write()"))},A.prototype._writev=null,A.prototype.end=function(t,e,n){var i=this._writableState;return"function"==typeof t?(n=t,t=null,e=null):"function"==typeof e&&(n=e,e=null),null!=t&&this.write(t,e),i.corked&&(i.corked=1,this.uncork()),i.ending||function(t,e,n){e.ending=!0,P(t,e),n&&(e.finished?r.nextTick(n):t.once("finish",n));e.ended=!0,t.writable=!1}(this,i,n),this},Object.defineProperty(A.prototype,"writableLength",{enumerable:!1,get:function(){return this._writableState.length}}),Object.defineProperty(A.prototype,"destroyed",{enumerable:!1,get:function(){return void 0!==this._writableState&&this._writableState.destroyed},set:function(t){this._writableState&&(this._writableState.destroyed=t)}}),A.prototype.destroy=f.destroy,A.prototype._undestroy=f.undestroy,A.prototype._destroy=function(t,e){e(t)}}).call(this)}).call(this,t("_process"),"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"../errors":281,"./_stream_duplex":282,"./internal/streams/destroy":289,"./internal/streams/state":293,"./internal/streams/stream":294,_process:272,buffer:80,inherits:226,"util-deprecate":325}],287:[function(t,e,r){(function(r){(function(){"use strict";var n;function i(t,e,r){return e in t?Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}):t[e]=r,t}var a=t("./end-of-stream"),o=Symbol("lastResolve"),s=Symbol("lastReject"),l=Symbol("error"),c=Symbol("ended"),u=Symbol("lastPromise"),f=Symbol("handlePromise"),h=Symbol("stream");function p(t,e){return{value:t,done:e}}function d(t){var e=t[o];if(null!==e){var r=t[h].read();null!==r&&(t[u]=null,t[o]=null,t[s]=null,e(p(r,!1)))}}function m(t){r.nextTick(d,t)}var g=Object.getPrototypeOf((function(){})),v=Object.setPrototypeOf((i(n={get stream(){return this[h]},next:function(){var t=this,e=this[l];if(null!==e)return Promise.reject(e);if(this[c])return Promise.resolve(p(void 0,!0));if(this[h].destroyed)return new Promise((function(e,n){r.nextTick((function(){t[l]?n(t[l]):e(p(void 0,!0))}))}));var n,i=this[u];if(i)n=new Promise(function(t,e){return function(r,n){t.then((function(){e[c]?r(p(void 0,!0)):e[f](r,n)}),n)}}(i,this));else{var a=this[h].read();if(null!==a)return Promise.resolve(p(a,!1));n=new Promise(this[f])}return this[u]=n,n}},Symbol.asyncIterator,(function(){return this})),i(n,"return",(function(){var t=this;return new Promise((function(e,r){t[h].destroy(null,(function(t){t?r(t):e(p(void 0,!0))}))}))})),n),g);e.exports=function(t){var e,r=Object.create(v,(i(e={},h,{value:t,writable:!0}),i(e,o,{value:null,writable:!0}),i(e,s,{value:null,writable:!0}),i(e,l,{value:null,writable:!0}),i(e,c,{value:t._readableState.endEmitted,writable:!0}),i(e,f,{value:function(t,e){var n=r[h].read();n?(r[u]=null,r[o]=null,r[s]=null,t(p(n,!1))):(r[o]=t,r[s]=e)},writable:!0}),e));return r[u]=null,a(t,(function(t){if(t&&"ERR_STREAM_PREMATURE_CLOSE"!==t.code){var e=r[s];return null!==e&&(r[u]=null,r[o]=null,r[s]=null,e(t)),void(r[l]=t)}var n=r[o];null!==n&&(r[u]=null,r[o]=null,r[s]=null,n(p(void 0,!0))),r[c]=!0})),t.on("readable",m.bind(null,r)),r}}).call(this)}).call(this,t("_process"))},{"./end-of-stream":290,_process:272}],288:[function(t,e,r){"use strict";function n(t,e){var r=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);e&&(n=n.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),r.push.apply(r,n)}return r}function i(t,e,r){return e in t?Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}):t[e]=r,t}function a(t,e){for(var r=0;r0?this.tail.next=e:this.head=e,this.tail=e,++this.length}},{key:"unshift",value:function(t){var e={data:t,next:this.head};0===this.length&&(this.tail=e),this.head=e,++this.length}},{key:"shift",value:function(){if(0!==this.length){var t=this.head.data;return 1===this.length?this.head=this.tail=null:this.head=this.head.next,--this.length,t}}},{key:"clear",value:function(){this.head=this.tail=null,this.length=0}},{key:"join",value:function(t){if(0===this.length)return"";for(var e=this.head,r=""+e.data;e=e.next;)r+=t+e.data;return r}},{key:"concat",value:function(t){if(0===this.length)return o.alloc(0);for(var e,r,n,i=o.allocUnsafe(t>>>0),a=this.head,s=0;a;)e=a.data,r=i,n=s,o.prototype.copy.call(e,r,n),s+=a.data.length,a=a.next;return i}},{key:"consume",value:function(t,e){var r;return ti.length?i.length:t;if(a===i.length?n+=i:n+=i.slice(0,t),0==(t-=a)){a===i.length?(++r,e.next?this.head=e.next:this.head=this.tail=null):(this.head=e,e.data=i.slice(a));break}++r}return this.length-=r,n}},{key:"_getBuffer",value:function(t){var e=o.allocUnsafe(t),r=this.head,n=1;for(r.data.copy(e),t-=r.data.length;r=r.next;){var i=r.data,a=t>i.length?i.length:t;if(i.copy(e,e.length-t,0,a),0==(t-=a)){a===i.length?(++n,r.next?this.head=r.next:this.head=this.tail=null):(this.head=r,r.data=i.slice(a));break}++n}return this.length-=n,e}},{key:l,value:function(t,e){return s(this,function(t){for(var e=1;e0,(function(t){n||(n=t),t&&o.forEach(c),a||(o.forEach(c),i(n))}))}));return e.reduce(u)}},{"../../../errors":281,"./end-of-stream":290}],293:[function(t,e,r){"use strict";var n=t("../../../errors").codes.ERR_INVALID_OPT_VALUE;e.exports={getHighWaterMark:function(t,e,r,i){var a=function(t,e,r){return null!=t.highWaterMark?t.highWaterMark:e?t[r]:null}(e,i,r);if(null!=a){if(!isFinite(a)||Math.floor(a)!==a||a<0)throw new n(i?r:"highWaterMark",a);return Math.floor(a)}return t.objectMode?16:16384}}},{"../../../errors":281}],294:[function(t,e,r){e.exports=t("events").EventEmitter},{events:181}],295:[function(t,e,r){(function(r,n){(function(){var r=t("assert"),i=t("debug")("stream-parser");e.exports=function(t){var e=t&&"function"==typeof t._transform,r=t&&"function"==typeof t._write;if(!e&&!r)throw new Error("must pass a Writable or Transform stream in");i("extending Parser into stream"),t._bytes=o,t._skipBytes=s,e&&(t._passthrough=l);e?t._transform=u:t._write=c};function a(t){i("initializing parser stream"),t._parserBytesLeft=0,t._parserBuffers=[],t._parserBuffered=0,t._parserState=-1,t._parserCallback=null,"function"==typeof t.push&&(t._parserOutput=t.push.bind(t)),t._parserInit=!0}function o(t,e){r(!this._parserCallback,'there is already a "callback" set!'),r(isFinite(t)&&t>0,'can only buffer a finite number of bytes > 0, got "'+t+'"'),this._parserInit||a(this),i("buffering %o bytes",t),this._parserBytesLeft=t,this._parserCallback=e,this._parserState=0}function s(t,e){r(!this._parserCallback,'there is already a "callback" set!'),r(t>0,'can only skip > 0 bytes, got "'+t+'"'),this._parserInit||a(this),i("skipping %o bytes",t),this._parserBytesLeft=t,this._parserCallback=e,this._parserState=1}function l(t,e){r(!this._parserCallback,'There is already a "callback" set!'),r(t>0,'can only pass through > 0 bytes, got "'+t+'"'),this._parserInit||a(this),i("passing through %o bytes",t),this._parserBytesLeft=t,this._parserCallback=e,this._parserState=2}function c(t,e,r){this._parserInit||a(this),i("write(%o bytes)",t.length),"function"==typeof e&&(r=e),h(this,t,null,r)}function u(t,e,r){this._parserInit||a(this),i("transform(%o bytes)",t.length),"function"!=typeof e&&(e=this._parserOutput),h(this,t,e,r)}function f(t,e,r,a){if(t._parserBytesLeft-=e.length,i("%o bytes left for stream piece",t._parserBytesLeft),0===t._parserState?(t._parserBuffers.push(e),t._parserBuffered+=e.length):2===t._parserState&&r(e),0!==t._parserBytesLeft)return a;var o=t._parserCallback;if(o&&0===t._parserState&&t._parserBuffers.length>1&&(e=n.concat(t._parserBuffers,t._parserBuffered)),0!==t._parserState&&(e=null),t._parserCallback=null,t._parserBuffered=0,t._parserState=-1,t._parserBuffers.splice(0),o){var s=[];e&&s.push(e),r&&s.push(r);var l=o.length>s.length;l&&s.push(p(a));var c=o.apply(t,s);if(!l||a===c)return a}}var h=p((function t(e,r,n,i){return e._parserBytesLeft<=0?i(new Error("got data but not currently parsing anything")):r.length<=e._parserBytesLeft?function(){return f(e,r,n,i)}:function(){var a=r.slice(0,e._parserBytesLeft);return f(e,a,n,(function(o){return o?i(o):r.length>a.length?function(){return t(e,r.slice(a.length),n,i)}:void 0}))}}));function p(t){return function(){for(var e=t.apply(this,arguments);"function"==typeof e;)e=e();return e}}}).call(this)}).call(this,t("_process"),t("buffer").Buffer)},{_process:272,assert:71,buffer:80,debug:296}],296:[function(t,e,r){(function(n){(function(){function i(){var t;try{t=r.storage.debug}catch(t){}return!t&&void 0!==n&&"env"in n&&(t=n.env.DEBUG),t}(r=e.exports=t("./debug")).log=function(){return"object"==typeof console&&console.log&&Function.prototype.apply.call(console.log,console,arguments)},r.formatArgs=function(t){var e=this.useColors;if(t[0]=(e?"%c":"")+this.namespace+(e?" %c":" ")+t[0]+(e?"%c ":" ")+"+"+r.humanize(this.diff),!e)return;var n="color: "+this.color;t.splice(1,0,n,"color: inherit");var i=0,a=0;t[0].replace(/%[a-zA-Z%]/g,(function(t){"%%"!==t&&(i++,"%c"===t&&(a=i))})),t.splice(a,0,n)},r.save=function(t){try{null==t?r.storage.removeItem("debug"):r.storage.debug=t}catch(t){}},r.load=i,r.useColors=function(){if("undefined"!=typeof window&&window.process&&"renderer"===window.process.type)return!0;return"undefined"!=typeof document&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||"undefined"!=typeof window&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/)&&parseInt(RegExp.$1,10)>=31||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/)},r.storage="undefined"!=typeof chrome&&void 0!==chrome.storage?chrome.storage.local:function(){try{return window.localStorage}catch(t){}}(),r.colors=["lightseagreen","forestgreen","goldenrod","dodgerblue","darkorchid","crimson"],r.formatters.j=function(t){try{return JSON.stringify(t)}catch(t){return"[UnexpectedJSONParseError]: "+t.message}},r.enable(i())}).call(this)}).call(this,t("_process"))},{"./debug":297,_process:272}],297:[function(t,e,r){var n;function i(t){function e(){if(e.enabled){var t=e,i=+new Date,a=i-(n||i);t.diff=a,t.prev=n,t.curr=i,n=i;for(var o=new Array(arguments.length),s=0;s0)return function(t){if((t=String(t)).length>100)return;var e=/^((?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|years?|yrs?|y)?$/i.exec(t);if(!e)return;var r=parseFloat(e[1]);switch((e[2]||"ms").toLowerCase()){case"years":case"year":case"yrs":case"yr":case"y":return 315576e5*r;case"days":case"day":case"d":return r*o;case"hours":case"hour":case"hrs":case"hr":case"h":return r*a;case"minutes":case"minute":case"mins":case"min":case"m":return r*i;case"seconds":case"second":case"secs":case"sec":case"s":return r*n;case"milliseconds":case"millisecond":case"msecs":case"msec":case"ms":return r;default:return}}(t);if("number"===l&&!1===isNaN(t))return e.long?s(r=t,o,"day")||s(r,a,"hour")||s(r,i,"minute")||s(r,n,"second")||r+" ms":function(t){if(t>=o)return Math.round(t/o)+"d";if(t>=a)return Math.round(t/a)+"h";if(t>=i)return Math.round(t/i)+"m";if(t>=n)return Math.round(t/n)+"s";return t+"ms"}(t);throw new Error("val is not a non-empty string or a valid number. val="+JSON.stringify(t))}},{}],299:[function(t,e,r){"use strict";var n=t("parenthesis");e.exports=function(t,e,r){if(null==t)throw Error("First argument should be a string");if(null==e)throw Error("Separator should be a string or a RegExp");r?("string"==typeof r||Array.isArray(r))&&(r={ignore:r}):r={},null==r.escape&&(r.escape=!0),null==r.ignore?r.ignore=["[]","()","{}","<>",'""',"''","``","\u201c\u201d","\xab\xbb"]:("string"==typeof r.ignore&&(r.ignore=[r.ignore]),r.ignore=r.ignore.map((function(t){return 1===t.length&&(t+=t),t})));var i=n.parse(t,{flat:!0,brackets:r.ignore}),a=i[0].split(e);if(r.escape){for(var o=[],s=0;s>5==6?2:t>>4==14?3:t>>3==30?4:t>>6==2?-1:-2}function s(t){var e=this.lastTotal-this.lastNeed,r=function(t,e,r){if(128!=(192&e[0]))return t.lastNeed=0,"\ufffd";if(t.lastNeed>1&&e.length>1){if(128!=(192&e[1]))return t.lastNeed=1,"\ufffd";if(t.lastNeed>2&&e.length>2&&128!=(192&e[2]))return t.lastNeed=2,"\ufffd"}}(this,t);return void 0!==r?r:this.lastNeed<=t.length?(t.copy(this.lastChar,e,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal)):(t.copy(this.lastChar,e,0,t.length),void(this.lastNeed-=t.length))}function l(t,e){if((t.length-e)%2==0){var r=t.toString("utf16le",e);if(r){var n=r.charCodeAt(r.length-1);if(n>=55296&&n<=56319)return this.lastNeed=2,this.lastTotal=4,this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1],r.slice(0,-1)}return r}return this.lastNeed=1,this.lastTotal=2,this.lastChar[0]=t[t.length-1],t.toString("utf16le",e,t.length-1)}function c(t){var e=t&&t.length?this.write(t):"";if(this.lastNeed){var r=this.lastTotal-this.lastNeed;return e+this.lastChar.toString("utf16le",0,r)}return e}function u(t,e){var r=(t.length-e)%3;return 0===r?t.toString("base64",e):(this.lastNeed=3-r,this.lastTotal=3,1===r?this.lastChar[0]=t[t.length-1]:(this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1]),t.toString("base64",e,t.length-r))}function f(t){var e=t&&t.length?this.write(t):"";return this.lastNeed?e+this.lastChar.toString("base64",0,3-this.lastNeed):e}function h(t){return t.toString(this.encoding)}function p(t){return t&&t.length?this.write(t):""}r.StringDecoder=a,a.prototype.write=function(t){if(0===t.length)return"";var e,r;if(this.lastNeed){if(void 0===(e=this.fillLast(t)))return"";r=this.lastNeed,this.lastNeed=0}else r=0;return r=0)return i>0&&(t.lastNeed=i-1),i;if(--n=0)return i>0&&(t.lastNeed=i-2),i;if(--n=0)return i>0&&(2===i?i=0:t.lastNeed=i-3),i;return 0}(this,t,e);if(!this.lastNeed)return t.toString("utf8",e);this.lastTotal=r;var n=t.length-(r-this.lastNeed);return t.copy(this.lastChar,0,n),t.toString("utf8",e,n)},a.prototype.fillLast=function(t){if(this.lastNeed<=t.length)return t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal);t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,t.length),this.lastNeed-=t.length}},{"safe-buffer":279}],301:[function(t,e,r){"use strict";e.exports=function(t){for(var e=t.length,r=new Array(e),n=new Array(e),i=new Array(e),a=new Array(e),o=new Array(e),s=new Array(e),l=0;l0;){e=c[c.length-1];var p=t[e];if(a[e]=0&&s[e].push(o[m])}a[e]=d}else{if(n[e]===r[e]){var g=[],v=[],y=0;for(d=l.length-1;d>=0;--d){var x=l[d];if(i[x]=!1,g.push(x),v.push(s[x]),y+=s[x].length,o[x]=f.length,x===e){l.length=d;break}}f.push(g);var b=new Array(y);for(d=0;d1&&(i=1),i<-1&&(i=-1),(t*n-e*r<0?-1:1)*Math.acos(i)};r.default=function(t){var e=t.px,r=t.py,l=t.cx,c=t.cy,u=t.rx,f=t.ry,h=t.xAxisRotation,p=void 0===h?0:h,d=t.largeArcFlag,m=void 0===d?0:d,g=t.sweepFlag,v=void 0===g?0:g,y=[];if(0===u||0===f)return[];var x=Math.sin(p*i/360),b=Math.cos(p*i/360),_=b*(e-l)/2+x*(r-c)/2,w=-x*(e-l)/2+b*(r-c)/2;if(0===_&&0===w)return[];u=Math.abs(u),f=Math.abs(f);var T=Math.pow(_,2)/Math.pow(u,2)+Math.pow(w,2)/Math.pow(f,2);T>1&&(u*=Math.sqrt(T),f*=Math.sqrt(T));var k=function(t,e,r,n,a,o,l,c,u,f,h,p){var d=Math.pow(a,2),m=Math.pow(o,2),g=Math.pow(h,2),v=Math.pow(p,2),y=d*m-d*v-m*g;y<0&&(y=0),y/=d*v+m*g;var x=(y=Math.sqrt(y)*(l===c?-1:1))*a/o*p,b=y*-o/a*h,_=f*x-u*b+(t+r)/2,w=u*x+f*b+(e+n)/2,T=(h-x)/a,k=(p-b)/o,A=(-h-x)/a,M=(-p-b)/o,S=s(1,0,T,k),E=s(T,k,A,M);return 0===c&&E>0&&(E-=i),1===c&&E<0&&(E+=i),[_,w,S,E]}(e,r,l,c,u,f,m,v,x,b,_,w),A=n(k,4),M=A[0],S=A[1],E=A[2],L=A[3],C=Math.abs(L)/(i/4);Math.abs(1-C)<1e-7&&(C=1);var P=Math.max(Math.ceil(C),1);L/=P;for(var I=0;Ie[2]&&(e[2]=l[c+0]),l[c+1]>e[3]&&(e[3]=l[c+1]);return e}},{"abs-svg-path":66,"is-svg-path":233,"normalize-svg-path":304,"parse-svg-path":245}],304:[function(t,e,r){"use strict";e.exports=function(t){for(var e,r=[],o=0,s=0,l=0,c=0,u=null,f=null,h=0,p=0,d=0,m=t.length;d4?(o=g[g.length-4],s=g[g.length-3]):(o=h,s=p),r.push(g)}return r};var n=t("svg-arc-to-cubic-bezier");function i(t,e,r,n){return["C",t,e,r,n,r,n]}function a(t,e,r,n,i,a){return["C",t/3+2/3*r,e/3+2/3*n,i/3+2/3*r,a/3+2/3*n,i,a]}},{"svg-arc-to-cubic-bezier":302}],305:[function(t,e,r){"use strict";var n,i=t("svg-path-bounds"),a=t("parse-svg-path"),o=t("draw-svg-path"),s=t("is-svg-path"),l=t("bitmap-sdf"),c=document.createElement("canvas"),u=c.getContext("2d");e.exports=function(t,e){if(!s(t))throw Error("Argument should be valid svg path string");e||(e={});var r,f;e.shape?(r=e.shape[0],f=e.shape[1]):(r=c.width=e.w||e.width||200,f=c.height=e.h||e.height||200);var h=Math.min(r,f),p=e.stroke||0,d=e.viewbox||e.viewBox||i(t),m=[r/(d[2]-d[0]),f/(d[3]-d[1])],g=Math.min(m[0]||0,m[1]||0)/2;u.fillStyle="black",u.fillRect(0,0,r,f),u.fillStyle="white",p&&("number"!=typeof p&&(p=1),u.strokeStyle=p>0?"white":"black",u.lineWidth=Math.abs(p));if(u.translate(.5*r,.5*f),u.scale(g,g),function(){if(null!=n)return n;var t=document.createElement("canvas").getContext("2d");if(t.canvas.width=t.canvas.height=1,!window.Path2D)return n=!1;var e=new Path2D("M0,0h1v1h-1v-1Z");t.fillStyle="black",t.fill(e);var r=t.getImageData(0,0,1,1);return n=r&&r.data&&255===r.data[3]}()){var v=new Path2D(t);u.fill(v),p&&u.stroke(v)}else{var y=a(t);o(u,y),u.fill(),p&&u.stroke()}return u.setTransform(1,0,0,1,0,0),l(u,{cutoff:null!=e.cutoff?e.cutoff:.5,radius:null!=e.radius?e.radius:.5*h})}},{"bitmap-sdf":78,"draw-svg-path":120,"is-svg-path":233,"parse-svg-path":245,"svg-path-bounds":303}],306:[function(t,e,r){(function(e,n){(function(){var i=t("process/browser.js").nextTick,a=Function.prototype.apply,o=Array.prototype.slice,s={},l=0;function c(t,e){this._id=t,this._clearFn=e}r.setTimeout=function(){return new c(a.call(setTimeout,window,arguments),clearTimeout)},r.setInterval=function(){return new c(a.call(setInterval,window,arguments),clearInterval)},r.clearTimeout=r.clearInterval=function(t){t.close()},c.prototype.unref=c.prototype.ref=function(){},c.prototype.close=function(){this._clearFn.call(window,this._id)},r.enroll=function(t,e){clearTimeout(t._idleTimeoutId),t._idleTimeout=e},r.unenroll=function(t){clearTimeout(t._idleTimeoutId),t._idleTimeout=-1},r._unrefActive=r.active=function(t){clearTimeout(t._idleTimeoutId);var e=t._idleTimeout;e>=0&&(t._idleTimeoutId=setTimeout((function(){t._onTimeout&&t._onTimeout()}),e))},r.setImmediate="function"==typeof e?e:function(t){var e=l++,n=!(arguments.length<2)&&o.call(arguments,1);return s[e]=!0,i((function(){s[e]&&(n?t.apply(null,n):t.call(null),r.clearImmediate(e))})),e},r.clearImmediate="function"==typeof n?n:function(t){delete s[t]}}).call(this)}).call(this,t("timers").setImmediate,t("timers").clearImmediate)},{"process/browser.js":272,timers:306}],307:[function(t,e,r){!function(t){var r=/^\s+/,n=/\s+$/,i=0,a=t.round,o=t.min,s=t.max,l=t.random;function c(e,l){if(l=l||{},(e=e||"")instanceof c)return e;if(!(this instanceof c))return new c(e,l);var u=function(e){var i={r:0,g:0,b:0},a=1,l=null,c=null,u=null,f=!1,h=!1;"string"==typeof e&&(e=function(t){t=t.replace(r,"").replace(n,"").toLowerCase();var e,i=!1;if(S[t])t=S[t],i=!0;else if("transparent"==t)return{r:0,g:0,b:0,a:0,format:"name"};if(e=j.rgb.exec(t))return{r:e[1],g:e[2],b:e[3]};if(e=j.rgba.exec(t))return{r:e[1],g:e[2],b:e[3],a:e[4]};if(e=j.hsl.exec(t))return{h:e[1],s:e[2],l:e[3]};if(e=j.hsla.exec(t))return{h:e[1],s:e[2],l:e[3],a:e[4]};if(e=j.hsv.exec(t))return{h:e[1],s:e[2],v:e[3]};if(e=j.hsva.exec(t))return{h:e[1],s:e[2],v:e[3],a:e[4]};if(e=j.hex8.exec(t))return{r:I(e[1]),g:I(e[2]),b:I(e[3]),a:R(e[4]),format:i?"name":"hex8"};if(e=j.hex6.exec(t))return{r:I(e[1]),g:I(e[2]),b:I(e[3]),format:i?"name":"hex"};if(e=j.hex4.exec(t))return{r:I(e[1]+""+e[1]),g:I(e[2]+""+e[2]),b:I(e[3]+""+e[3]),a:R(e[4]+""+e[4]),format:i?"name":"hex8"};if(e=j.hex3.exec(t))return{r:I(e[1]+""+e[1]),g:I(e[2]+""+e[2]),b:I(e[3]+""+e[3]),format:i?"name":"hex"};return!1}(e));"object"==typeof e&&(U(e.r)&&U(e.g)&&U(e.b)?(p=e.r,d=e.g,m=e.b,i={r:255*C(p,255),g:255*C(d,255),b:255*C(m,255)},f=!0,h="%"===String(e.r).substr(-1)?"prgb":"rgb"):U(e.h)&&U(e.s)&&U(e.v)?(l=z(e.s),c=z(e.v),i=function(e,r,n){e=6*C(e,360),r=C(r,100),n=C(n,100);var i=t.floor(e),a=e-i,o=n*(1-r),s=n*(1-a*r),l=n*(1-(1-a)*r),c=i%6;return{r:255*[n,s,o,o,l,n][c],g:255*[l,n,n,s,o,o][c],b:255*[o,o,l,n,n,s][c]}}(e.h,l,c),f=!0,h="hsv"):U(e.h)&&U(e.s)&&U(e.l)&&(l=z(e.s),u=z(e.l),i=function(t,e,r){var n,i,a;function o(t,e,r){return r<0&&(r+=1),r>1&&(r-=1),r<1/6?t+6*(e-t)*r:r<.5?e:r<2/3?t+(e-t)*(2/3-r)*6:t}if(t=C(t,360),e=C(e,100),r=C(r,100),0===e)n=i=a=r;else{var s=r<.5?r*(1+e):r+e-r*e,l=2*r-s;n=o(l,s,t+1/3),i=o(l,s,t),a=o(l,s,t-1/3)}return{r:255*n,g:255*i,b:255*a}}(e.h,l,u),f=!0,h="hsl"),e.hasOwnProperty("a")&&(a=e.a));var p,d,m;return a=L(a),{ok:f,format:e.format||h,r:o(255,s(i.r,0)),g:o(255,s(i.g,0)),b:o(255,s(i.b,0)),a:a}}(e);this._originalInput=e,this._r=u.r,this._g=u.g,this._b=u.b,this._a=u.a,this._roundA=a(100*this._a)/100,this._format=l.format||u.format,this._gradientType=l.gradientType,this._r<1&&(this._r=a(this._r)),this._g<1&&(this._g=a(this._g)),this._b<1&&(this._b=a(this._b)),this._ok=u.ok,this._tc_id=i++}function u(t,e,r){t=C(t,255),e=C(e,255),r=C(r,255);var n,i,a=s(t,e,r),l=o(t,e,r),c=(a+l)/2;if(a==l)n=i=0;else{var u=a-l;switch(i=c>.5?u/(2-a-l):u/(a+l),a){case t:n=(e-r)/u+(e>1)+720)%360;--e;)n.h=(n.h+i)%360,a.push(c(n));return a}function M(t,e){e=e||6;for(var r=c(t).toHsv(),n=r.h,i=r.s,a=r.v,o=[],s=1/e;e--;)o.push(c({h:n,s:i,v:a})),a=(a+s)%1;return o}c.prototype={isDark:function(){return this.getBrightness()<128},isLight:function(){return!this.isDark()},isValid:function(){return this._ok},getOriginalInput:function(){return this._originalInput},getFormat:function(){return this._format},getAlpha:function(){return this._a},getBrightness:function(){var t=this.toRgb();return(299*t.r+587*t.g+114*t.b)/1e3},getLuminance:function(){var e,r,n,i=this.toRgb();return e=i.r/255,r=i.g/255,n=i.b/255,.2126*(e<=.03928?e/12.92:t.pow((e+.055)/1.055,2.4))+.7152*(r<=.03928?r/12.92:t.pow((r+.055)/1.055,2.4))+.0722*(n<=.03928?n/12.92:t.pow((n+.055)/1.055,2.4))},setAlpha:function(t){return this._a=L(t),this._roundA=a(100*this._a)/100,this},toHsv:function(){var t=f(this._r,this._g,this._b);return{h:360*t.h,s:t.s,v:t.v,a:this._a}},toHsvString:function(){var t=f(this._r,this._g,this._b),e=a(360*t.h),r=a(100*t.s),n=a(100*t.v);return 1==this._a?"hsv("+e+", "+r+"%, "+n+"%)":"hsva("+e+", "+r+"%, "+n+"%, "+this._roundA+")"},toHsl:function(){var t=u(this._r,this._g,this._b);return{h:360*t.h,s:t.s,l:t.l,a:this._a}},toHslString:function(){var t=u(this._r,this._g,this._b),e=a(360*t.h),r=a(100*t.s),n=a(100*t.l);return 1==this._a?"hsl("+e+", "+r+"%, "+n+"%)":"hsla("+e+", "+r+"%, "+n+"%, "+this._roundA+")"},toHex:function(t){return h(this._r,this._g,this._b,t)},toHexString:function(t){return"#"+this.toHex(t)},toHex8:function(t){return function(t,e,r,n,i){var o=[O(a(t).toString(16)),O(a(e).toString(16)),O(a(r).toString(16)),O(D(n))];if(i&&o[0].charAt(0)==o[0].charAt(1)&&o[1].charAt(0)==o[1].charAt(1)&&o[2].charAt(0)==o[2].charAt(1)&&o[3].charAt(0)==o[3].charAt(1))return o[0].charAt(0)+o[1].charAt(0)+o[2].charAt(0)+o[3].charAt(0);return o.join("")}(this._r,this._g,this._b,this._a,t)},toHex8String:function(t){return"#"+this.toHex8(t)},toRgb:function(){return{r:a(this._r),g:a(this._g),b:a(this._b),a:this._a}},toRgbString:function(){return 1==this._a?"rgb("+a(this._r)+", "+a(this._g)+", "+a(this._b)+")":"rgba("+a(this._r)+", "+a(this._g)+", "+a(this._b)+", "+this._roundA+")"},toPercentageRgb:function(){return{r:a(100*C(this._r,255))+"%",g:a(100*C(this._g,255))+"%",b:a(100*C(this._b,255))+"%",a:this._a}},toPercentageRgbString:function(){return 1==this._a?"rgb("+a(100*C(this._r,255))+"%, "+a(100*C(this._g,255))+"%, "+a(100*C(this._b,255))+"%)":"rgba("+a(100*C(this._r,255))+"%, "+a(100*C(this._g,255))+"%, "+a(100*C(this._b,255))+"%, "+this._roundA+")"},toName:function(){return 0===this._a?"transparent":!(this._a<1)&&(E[h(this._r,this._g,this._b,!0)]||!1)},toFilter:function(t){var e="#"+p(this._r,this._g,this._b,this._a),r=e,n=this._gradientType?"GradientType = 1, ":"";if(t){var i=c(t);r="#"+p(i._r,i._g,i._b,i._a)}return"progid:DXImageTransform.Microsoft.gradient("+n+"startColorstr="+e+",endColorstr="+r+")"},toString:function(t){var e=!!t;t=t||this._format;var r=!1,n=this._a<1&&this._a>=0;return e||!n||"hex"!==t&&"hex6"!==t&&"hex3"!==t&&"hex4"!==t&&"hex8"!==t&&"name"!==t?("rgb"===t&&(r=this.toRgbString()),"prgb"===t&&(r=this.toPercentageRgbString()),"hex"!==t&&"hex6"!==t||(r=this.toHexString()),"hex3"===t&&(r=this.toHexString(!0)),"hex4"===t&&(r=this.toHex8String(!0)),"hex8"===t&&(r=this.toHex8String()),"name"===t&&(r=this.toName()),"hsl"===t&&(r=this.toHslString()),"hsv"===t&&(r=this.toHsvString()),r||this.toHexString()):"name"===t&&0===this._a?this.toName():this.toRgbString()},clone:function(){return c(this.toString())},_applyModification:function(t,e){var r=t.apply(null,[this].concat([].slice.call(e)));return this._r=r._r,this._g=r._g,this._b=r._b,this.setAlpha(r._a),this},lighten:function(){return this._applyModification(v,arguments)},brighten:function(){return this._applyModification(y,arguments)},darken:function(){return this._applyModification(x,arguments)},desaturate:function(){return this._applyModification(d,arguments)},saturate:function(){return this._applyModification(m,arguments)},greyscale:function(){return this._applyModification(g,arguments)},spin:function(){return this._applyModification(b,arguments)},_applyCombination:function(t,e){return t.apply(null,[this].concat([].slice.call(e)))},analogous:function(){return this._applyCombination(A,arguments)},complement:function(){return this._applyCombination(_,arguments)},monochromatic:function(){return this._applyCombination(M,arguments)},splitcomplement:function(){return this._applyCombination(k,arguments)},triad:function(){return this._applyCombination(w,arguments)},tetrad:function(){return this._applyCombination(T,arguments)}},c.fromRatio=function(t,e){if("object"==typeof t){var r={};for(var n in t)t.hasOwnProperty(n)&&(r[n]="a"===n?t[n]:z(t[n]));t=r}return c(t,e)},c.equals=function(t,e){return!(!t||!e)&&c(t).toRgbString()==c(e).toRgbString()},c.random=function(){return c.fromRatio({r:l(),g:l(),b:l()})},c.mix=function(t,e,r){r=0===r?0:r||50;var n=c(t).toRgb(),i=c(e).toRgb(),a=r/100;return c({r:(i.r-n.r)*a+n.r,g:(i.g-n.g)*a+n.g,b:(i.b-n.b)*a+n.b,a:(i.a-n.a)*a+n.a})},c.readability=function(e,r){var n=c(e),i=c(r);return(t.max(n.getLuminance(),i.getLuminance())+.05)/(t.min(n.getLuminance(),i.getLuminance())+.05)},c.isReadable=function(t,e,r){var n,i,a=c.readability(t,e);switch(i=!1,(n=function(t){var e,r;e=((t=t||{level:"AA",size:"small"}).level||"AA").toUpperCase(),r=(t.size||"small").toLowerCase(),"AA"!==e&&"AAA"!==e&&(e="AA");"small"!==r&&"large"!==r&&(r="small");return{level:e,size:r}}(r)).level+n.size){case"AAsmall":case"AAAlarge":i=a>=4.5;break;case"AAlarge":i=a>=3;break;case"AAAsmall":i=a>=7}return i},c.mostReadable=function(t,e,r){var n,i,a,o,s=null,l=0;i=(r=r||{}).includeFallbackColors,a=r.level,o=r.size;for(var u=0;ul&&(l=n,s=c(e[u]));return c.isReadable(t,s,{level:a,size:o})||!i?s:(r.includeFallbackColors=!1,c.mostReadable(t,["#fff","#000"],r))};var S=c.names={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"0ff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000",blanchedalmond:"ffebcd",blue:"00f",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",burntsienna:"ea7e5d",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"0ff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkgrey:"a9a9a9",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkslategrey:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dimgrey:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"f0f",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",grey:"808080",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgray:"d3d3d3",lightgreen:"90ee90",lightgrey:"d3d3d3",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"789",lightslategrey:"789",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"0f0",limegreen:"32cd32",linen:"faf0e6",magenta:"f0f",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370db",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"db7093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",rebeccapurple:"663399",red:"f00",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",slategrey:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"fff",whitesmoke:"f5f5f5",yellow:"ff0",yellowgreen:"9acd32"},E=c.hexNames=function(t){var e={};for(var r in t)t.hasOwnProperty(r)&&(e[t[r]]=r);return e}(S);function L(t){return t=parseFloat(t),(isNaN(t)||t<0||t>1)&&(t=1),t}function C(e,r){(function(t){return"string"==typeof t&&-1!=t.indexOf(".")&&1===parseFloat(t)})(e)&&(e="100%");var n=function(t){return"string"==typeof t&&-1!=t.indexOf("%")}(e);return e=o(r,s(0,parseFloat(e))),n&&(e=parseInt(e*r,10)/100),t.abs(e-r)<1e-6?1:e%r/parseFloat(r)}function P(t){return o(1,s(0,t))}function I(t){return parseInt(t,16)}function O(t){return 1==t.length?"0"+t:""+t}function z(t){return t<=1&&(t=100*t+"%"),t}function D(e){return t.round(255*parseFloat(e)).toString(16)}function R(t){return I(t)/255}var F,B,N,j=(B="[\\s|\\(]+("+(F="(?:[-\\+]?\\d*\\.\\d+%?)|(?:[-\\+]?\\d+%?)")+")[,|\\s]+("+F+")[,|\\s]+("+F+")\\s*\\)?",N="[\\s|\\(]+("+F+")[,|\\s]+("+F+")[,|\\s]+("+F+")[,|\\s]+("+F+")\\s*\\)?",{CSS_UNIT:new RegExp(F),rgb:new RegExp("rgb"+B),rgba:new RegExp("rgba"+N),hsl:new RegExp("hsl"+B),hsla:new RegExp("hsla"+N),hsv:new RegExp("hsv"+B),hsva:new RegExp("hsva"+N),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/});function U(t){return!!j.CSS_UNIT.exec(t)}void 0!==e&&e.exports?e.exports=c:window.tinycolor=c}(Math)},{}],308:[function(t,e,r){"use strict";e.exports=i,e.exports.float32=e.exports.float=i,e.exports.fract32=e.exports.fract=function(t,e){if(t.length){if(t instanceof Float32Array)return new Float32Array(t.length);e instanceof Float32Array||(e=i(t));for(var r=0,n=e.length;ro&&(o=t[0]),t[1]s&&(s=t[1])}function c(t){switch(t.type){case"GeometryCollection":t.geometries.forEach(c);break;case"Point":l(t.coordinates);break;case"MultiPoint":t.coordinates.forEach(l)}}for(e in t.arcs.forEach((function(t){for(var e,r=-1,l=t.length;++ro&&(o=e[0]),e[1]s&&(s=e[1])})),t.objects)c(t.objects[e]);return[i,a,o,s]}function i(t,e){var r=e.id,n=e.bbox,i=null==e.properties?{}:e.properties,o=a(t,e);return null==r&&null==n?{type:"Feature",properties:i,geometry:o}:null==n?{type:"Feature",id:r,properties:i,geometry:o}:{type:"Feature",id:r,bbox:n,properties:i,geometry:o}}function a(t,e){var n=r(t.transform),i=t.arcs;function a(t,e){e.length&&e.pop();for(var r=i[t<0?~t:t],a=0,o=r.length;a1)n=l(t,e,r);else for(i=0,n=new Array(a=t.arcs.length);i1)for(var a,s,c=1,u=l(i[0]);cu&&(s=i[0],i[0]=i[c],i[c]=s,u=a);return i})).filter((function(t){return t.length>0}))}}function u(t,e){for(var r=0,n=t.length;r>>1;t[i]=2))throw new Error("n must be \u22652");var r,i=(l=t.bbox||n(t))[0],a=l[1],o=l[2],s=l[3];e={scale:[o-i?(o-i)/(r-1):1,s-a?(s-a)/(r-1):1],translate:[i,a]}}var l,c,u=f(e),h=t.objects,p={};function d(t){return u(t)}function m(t){var e;switch(t.type){case"GeometryCollection":e={type:"GeometryCollection",geometries:t.geometries.map(m)};break;case"Point":e={type:"Point",coordinates:d(t.coordinates)};break;case"MultiPoint":e={type:"MultiPoint",coordinates:t.coordinates.map(d)};break;default:return t}return null!=t.id&&(e.id=t.id),null!=t.bbox&&(e.bbox=t.bbox),null!=t.properties&&(e.properties=t.properties),e}for(c in h)p[c]=m(h[c]);return{type:"Topology",bbox:l,transform:e,objects:p,arcs:t.arcs.map((function(t){var e,r=0,n=1,i=t.length,a=new Array(i);for(a[0]=u(t[0],0);++r":(e.length>100&&(e=e.slice(0,99)+"\u2026"),e=e.replace(i,(function(t){switch(t){case"\n":return"\\n";case"\r":return"\\r";case"\u2028":return"\\u2028";case"\u2029":return"\\u2029";default:throw new Error("Unexpected character")}})))}},{"./safe-to-string":313}],315:[function(t,e,r){"use strict";var n=t("../value/is"),i={object:!0,function:!0,undefined:!0};e.exports=function(t){return!!n(t)&&hasOwnProperty.call(i,typeof t)}},{"../value/is":321}],316:[function(t,e,r){"use strict";var n=t("../lib/resolve-exception"),i=t("./is");e.exports=function(t){return i(t)?t:n(t,"%v is not a plain function",arguments[1])}},{"../lib/resolve-exception":312,"./is":317}],317:[function(t,e,r){"use strict";var n=t("../function/is"),i=/^\s*class[\s{/}]/,a=Function.prototype.toString;e.exports=function(t){return!!n(t)&&!i.test(a.call(t))}},{"../function/is":311}],318:[function(t,e,r){"use strict";var n=t("../object/is");e.exports=function(t){if(!n(t))return!1;try{return!!t.constructor&&t.constructor.prototype===t}catch(t){return!1}}},{"../object/is":315}],319:[function(t,e,r){"use strict";var n=t("../value/is"),i=t("../object/is"),a=Object.prototype.toString;e.exports=function(t){if(!n(t))return null;if(i(t)){var e=t.toString;if("function"!=typeof e)return null;if(e===a)return null}try{return""+t}catch(t){return null}}},{"../object/is":315,"../value/is":321}],320:[function(t,e,r){"use strict";var n=t("../lib/resolve-exception"),i=t("./is");e.exports=function(t){return i(t)?t:n(t,"Cannot use %v",arguments[1])}},{"../lib/resolve-exception":312,"./is":321}],321:[function(t,e,r){"use strict";e.exports=function(t){return null!=t}},{}],322:[function(t,e,r){(function(e){(function(){"use strict";var n=t("bit-twiddle"),i=t("dup"),a=t("buffer").Buffer;e.__TYPEDARRAY_POOL||(e.__TYPEDARRAY_POOL={UINT8:i([32,0]),UINT16:i([32,0]),UINT32:i([32,0]),BIGUINT64:i([32,0]),INT8:i([32,0]),INT16:i([32,0]),INT32:i([32,0]),BIGINT64:i([32,0]),FLOAT:i([32,0]),DOUBLE:i([32,0]),DATA:i([32,0]),UINT8C:i([32,0]),BUFFER:i([32,0])});var o="undefined"!=typeof Uint8ClampedArray,s="undefined"!=typeof BigUint64Array,l="undefined"!=typeof BigInt64Array,c=e.__TYPEDARRAY_POOL;c.UINT8C||(c.UINT8C=i([32,0])),c.BIGUINT64||(c.BIGUINT64=i([32,0])),c.BIGINT64||(c.BIGINT64=i([32,0])),c.BUFFER||(c.BUFFER=i([32,0]));var u=c.DATA,f=c.BUFFER;function h(t){if(t){var e=t.length||t.byteLength,r=n.log2(e);u[r].push(t)}}function p(t){t=n.nextPow2(t);var e=n.log2(t),r=u[e];return r.length>0?r.pop():new ArrayBuffer(t)}function d(t){return new Uint8Array(p(t),0,t)}function m(t){return new Uint16Array(p(2*t),0,t)}function g(t){return new Uint32Array(p(4*t),0,t)}function v(t){return new Int8Array(p(t),0,t)}function y(t){return new Int16Array(p(2*t),0,t)}function x(t){return new Int32Array(p(4*t),0,t)}function b(t){return new Float32Array(p(4*t),0,t)}function _(t){return new Float64Array(p(8*t),0,t)}function w(t){return o?new Uint8ClampedArray(p(t),0,t):d(t)}function T(t){return s?new BigUint64Array(p(8*t),0,t):null}function k(t){return l?new BigInt64Array(p(8*t),0,t):null}function A(t){return new DataView(p(t),0,t)}function M(t){t=n.nextPow2(t);var e=n.log2(t),r=f[e];return r.length>0?r.pop():new a(t)}r.free=function(t){if(a.isBuffer(t))f[n.log2(t.length)].push(t);else{if("[object ArrayBuffer]"!==Object.prototype.toString.call(t)&&(t=t.buffer),!t)return;var e=t.length||t.byteLength,r=0|n.log2(e);u[r].push(t)}},r.freeUint8=r.freeUint16=r.freeUint32=r.freeBigUint64=r.freeInt8=r.freeInt16=r.freeInt32=r.freeBigInt64=r.freeFloat32=r.freeFloat=r.freeFloat64=r.freeDouble=r.freeUint8Clamped=r.freeDataView=function(t){h(t.buffer)},r.freeArrayBuffer=h,r.freeBuffer=function(t){f[n.log2(t.length)].push(t)},r.malloc=function(t,e){if(void 0===e||"arraybuffer"===e)return p(t);switch(e){case"uint8":return d(t);case"uint16":return m(t);case"uint32":return g(t);case"int8":return v(t);case"int16":return y(t);case"int32":return x(t);case"float":case"float32":return b(t);case"double":case"float64":return _(t);case"uint8_clamped":return w(t);case"bigint64":return k(t);case"biguint64":return T(t);case"buffer":return M(t);case"data":case"dataview":return A(t);default:return null}return null},r.mallocArrayBuffer=p,r.mallocUint8=d,r.mallocUint16=m,r.mallocUint32=g,r.mallocInt8=v,r.mallocInt16=y,r.mallocInt32=x,r.mallocFloat32=r.mallocFloat=b,r.mallocFloat64=r.mallocDouble=_,r.mallocUint8Clamped=w,r.mallocBigUint64=T,r.mallocBigInt64=k,r.mallocDataView=A,r.mallocBuffer=M,r.clearCache=function(){for(var t=0;t<32;++t)c.UINT8[t].length=0,c.UINT16[t].length=0,c.UINT32[t].length=0,c.INT8[t].length=0,c.INT16[t].length=0,c.INT32[t].length=0,c.FLOAT[t].length=0,c.DOUBLE[t].length=0,c.BIGUINT64[t].length=0,c.BIGINT64[t].length=0,c.UINT8C[t].length=0,u[t].length=0,f[t].length=0}}).call(this)}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"bit-twiddle":77,buffer:80,dup:122}],323:[function(t,e,r){var n=/[\'\"]/;e.exports=function(t){return t?(n.test(t.charAt(0))&&(t=t.substr(1)),n.test(t.charAt(t.length-1))&&(t=t.substr(0,t.length-1)),t):""}},{}],324:[function(t,e,r){"use strict";e.exports=function(t,e,r){Array.isArray(r)||(r=[].slice.call(arguments,2));for(var n=0,i=r.length;n2111)throw e.replace(/\{0\}/,this.local.name);return t},toMonthIndex:function(t,e,r){var i=this.intercalaryMonth(t);if(r&&e!==i||e<1||e>12)throw n.local.invalidMonth.replace(/\{0\}/,this.local.name);return i?!r&&e<=i?e-1:e:e-1},toChineseMonth:function(t,e){t.year&&(e=(t=t.year()).month());var r=this.intercalaryMonth(t);if(e<0||e>(r?12:11))throw n.local.invalidMonth.replace(/\{0\}/,this.local.name);return r?e>13},isIntercalaryMonth:function(t,e){t.year&&(e=(t=t.year()).month());var r=this.intercalaryMonth(t);return!!r&&r===e},leapYear:function(t){return 0!==this.intercalaryMonth(t)},weekOfYear:function(t,e,r){var i,o=this._validateYear(t,n.local.invalidyear),s=h[o-h[0]],l=s>>9&4095,c=s>>5&15,u=31&s;(i=a.newDate(l,c,u)).add(4-(i.dayOfWeek()||7),"d");var f=this.toJD(t,e,r)-i.toJD();return 1+Math.floor(f/7)},monthsInYear:function(t){return this.leapYear(t)?13:12},daysInMonth:function(t,e){t.year&&(e=t.month(),t=t.year()),t=this._validateYear(t);var r=f[t-f[0]];if(e>(r>>13?12:11))throw n.local.invalidMonth.replace(/\{0\}/,this.local.name);return r&1<<12-e?30:29},weekDay:function(t,e,r){return(this.dayOfWeek(t,e,r)||7)<6},toJD:function(t,e,r){var i=this._validate(t,s,r,n.local.invalidDate);t=this._validateYear(i.year()),e=i.month(),r=i.day();var o=this.isIntercalaryMonth(t,e),s=this.toChineseMonth(t,e),l=function(t,e,r,n,i){var a,o,s;if("object"==typeof t)o=t,a=e||{};else{var l;if(!("number"==typeof t&&t>=1888&&t<=2111))throw new Error("Lunar year outside range 1888-2111");if(!("number"==typeof e&&e>=1&&e<=12))throw new Error("Lunar month outside range 1 - 12");if(!("number"==typeof r&&r>=1&&r<=30))throw new Error("Lunar day outside range 1 - 30");"object"==typeof n?(l=!1,a=n):(l=!!n,a=i||{}),o={year:t,month:e,day:r,isIntercalary:l}}s=o.day-1;var c,u=f[o.year-f[0]],p=u>>13;c=p&&(o.month>p||o.isIntercalary)?o.month:o.month-1;for(var d=0;d>9&4095,(m>>5&15)-1,(31&m)+s);return a.year=g.getFullYear(),a.month=1+g.getMonth(),a.day=g.getDate(),a}(t,s,r,o);return a.toJD(l.year,l.month,l.day)},fromJD:function(t){var e=a.fromJD(t),r=function(t,e,r,n){var i,a;if("object"==typeof t)i=t,a=e||{};else{if(!("number"==typeof t&&t>=1888&&t<=2111))throw new Error("Solar year outside range 1888-2111");if(!("number"==typeof e&&e>=1&&e<=12))throw new Error("Solar month outside range 1 - 12");if(!("number"==typeof r&&r>=1&&r<=31))throw new Error("Solar day outside range 1 - 31");i={year:t,month:e,day:r},a=n||{}}var o=h[i.year-h[0]],s=i.year<<9|i.month<<5|i.day;a.year=s>=o?i.year:i.year-1,o=h[a.year-h[0]];var l,c=new Date(o>>9&4095,(o>>5&15)-1,31&o),u=new Date(i.year,i.month-1,i.day);l=Math.round((u-c)/864e5);var p,d=f[a.year-f[0]];for(p=0;p<13;p++){var m=d&1<<12-p?30:29;if(l>13;!g||p=2&&n<=6},extraInfo:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);return{century:o[Math.floor((i.year()-1)/100)+1]||""}},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);return t=i.year()+(i.year()<0?1:0),e=i.month(),(r=i.day())+(e>1?16:0)+(e>2?32*(e-2):0)+400*(t-1)+this.jdEpoch-1},fromJD:function(t){t=Math.floor(t+.5)-Math.floor(this.jdEpoch)-1;var e=Math.floor(t/400)+1;t-=400*(e-1),t+=t>15?16:0;var r=Math.floor(t/32)+1,n=t-32*(r-1)+1;return this.newDate(e<=0?e-1:e,r,n)}});var o={20:"Fruitbat",21:"Anchovy"};n.calendars.discworld=a},{"../main":341,"object-assign":242}],330:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new n.baseCalendar,i(a.prototype,{name:"Ethiopian",jdEpoch:1724220.5,daysPerMonth:[30,30,30,30,30,30,30,30,30,30,30,30,5],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Ethiopian",epochs:["BEE","EE"],monthNames:["Meskerem","Tikemet","Hidar","Tahesas","Tir","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehase","Pagume"],monthNamesShort:["Mes","Tik","Hid","Tah","Tir","Yek","Meg","Mia","Gen","Sen","Ham","Neh","Pag"],dayNames:["Ehud","Segno","Maksegno","Irob","Hamus","Arb","Kidame"],dayNamesShort:["Ehu","Seg","Mak","Iro","Ham","Arb","Kid"],dayNamesMin:["Eh","Se","Ma","Ir","Ha","Ar","Ki"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);return(t=e.year()+(e.year()<0?1:0))%4==3||t%4==-1},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,n.local.invalidYear||n.regionalOptions[""].invalidYear),13},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(-n.dayOfWeek(),"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var r=this._validate(t,e,this.minDay,n.local.invalidMonth);return this.daysPerMonth[r.month()-1]+(13===r.month()&&this.leapYear(r.year())?1:0)},weekDay:function(t,e,r){return(this.dayOfWeek(t,e,r)||7)<6},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);return(t=i.year())<0&&t++,i.day()+30*(i.month()-1)+365*(t-1)+Math.floor(t/4)+this.jdEpoch-1},fromJD:function(t){var e=Math.floor(t)+.5-this.jdEpoch,r=Math.floor((e-Math.floor((e+366)/1461))/365)+1;r<=0&&r--,e=Math.floor(t)+.5-this.newDate(r,1,1).toJD();var n=Math.floor(e/30)+1,i=e-30*(n-1)+1;return this.newDate(r,n,i)}}),n.calendars.ethiopian=a},{"../main":341,"object-assign":242}],331:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}function o(t,e){return t-e*Math.floor(t/e)}a.prototype=new n.baseCalendar,i(a.prototype,{name:"Hebrew",jdEpoch:347995.5,daysPerMonth:[30,29,30,29,30,29,30,29,30,29,30,29,29],hasYearZero:!1,minMonth:1,firstMonth:7,minDay:1,regionalOptions:{"":{name:"Hebrew",epochs:["BAM","AM"],monthNames:["Nisan","Iyar","Sivan","Tammuz","Av","Elul","Tishrei","Cheshvan","Kislev","Tevet","Shevat","Adar","Adar II"],monthNamesShort:["Nis","Iya","Siv","Tam","Av","Elu","Tis","Che","Kis","Tev","She","Ada","Ad2"],dayNames:["Yom Rishon","Yom Sheni","Yom Shlishi","Yom Revi'i","Yom Chamishi","Yom Shishi","Yom Shabbat"],dayNamesShort:["Ris","She","Shl","Rev","Cha","Shi","Sha"],dayNamesMin:["Ri","She","Shl","Re","Ch","Shi","Sha"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);return this._leapYear(e.year())},_leapYear:function(t){return o(7*(t=t<0?t+1:t)+1,19)<7},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,n.local.invalidYear),this._leapYear(t.year?t.year():t)?13:12},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(-n.dayOfWeek(),"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInYear:function(t){return t=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear).year(),this.toJD(-1===t?1:t+1,7,1)-this.toJD(t,7,1)},daysInMonth:function(t,e){return t.year&&(e=t.month(),t=t.year()),this._validate(t,e,this.minDay,n.local.invalidMonth),12===e&&this.leapYear(t)||8===e&&5===o(this.daysInYear(t),10)?30:9===e&&3===o(this.daysInYear(t),10)?29:this.daysPerMonth[e-1]},weekDay:function(t,e,r){return 6!==this.dayOfWeek(t,e,r)},extraInfo:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);return{yearType:(this.leapYear(i)?"embolismic":"common")+" "+["deficient","regular","complete"][this.daysInYear(i)%10-3]}},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);t=i.year(),e=i.month(),r=i.day();var a=t<=0?t+1:t,o=this.jdEpoch+this._delay1(a)+this._delay2(a)+r+1;if(e<7){for(var s=7;s<=this.monthsInYear(t);s++)o+=this.daysInMonth(t,s);for(s=1;s=this.toJD(-1===e?1:e+1,7,1);)e++;for(var r=tthis.toJD(e,r,this.daysInMonth(e,r));)r++;var n=t-this.toJD(e,r,1)+1;return this.newDate(e,r,n)}}),n.calendars.hebrew=a},{"../main":341,"object-assign":242}],332:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new n.baseCalendar,i(a.prototype,{name:"Islamic",jdEpoch:1948439.5,daysPerMonth:[30,29,30,29,30,29,30,29,30,29,30,29],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Islamic",epochs:["BH","AH"],monthNames:["Muharram","Safar","Rabi' al-awwal","Rabi' al-thani","Jumada al-awwal","Jumada al-thani","Rajab","Sha'aban","Ramadan","Shawwal","Dhu al-Qi'dah","Dhu al-Hijjah"],monthNamesShort:["Muh","Saf","Rab1","Rab2","Jum1","Jum2","Raj","Sha'","Ram","Shaw","DhuQ","DhuH"],dayNames:["Yawm al-ahad","Yawm al-ithnayn","Yawm ath-thulaathaa'","Yawm al-arbi'aa'","Yawm al-kham\u012bs","Yawm al-jum'a","Yawm as-sabt"],dayNamesShort:["Aha","Ith","Thu","Arb","Kha","Jum","Sab"],dayNamesMin:["Ah","It","Th","Ar","Kh","Ju","Sa"],digits:null,dateFormat:"yyyy/mm/dd",firstDay:6,isRTL:!1}},leapYear:function(t){return(11*this._validate(t,this.minMonth,this.minDay,n.local.invalidYear).year()+14)%30<11},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(-n.dayOfWeek(),"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInYear:function(t){return this.leapYear(t)?355:354},daysInMonth:function(t,e){var r=this._validate(t,e,this.minDay,n.local.invalidMonth);return this.daysPerMonth[r.month()-1]+(12===r.month()&&this.leapYear(r.year())?1:0)},weekDay:function(t,e,r){return 5!==this.dayOfWeek(t,e,r)},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);return t=i.year(),e=i.month(),t=t<=0?t+1:t,(r=i.day())+Math.ceil(29.5*(e-1))+354*(t-1)+Math.floor((3+11*t)/30)+this.jdEpoch-1},fromJD:function(t){t=Math.floor(t)+.5;var e=Math.floor((30*(t-this.jdEpoch)+10646)/10631);e=e<=0?e-1:e;var r=Math.min(12,Math.ceil((t-29-this.toJD(e,1,1))/29.5)+1),n=t-this.toJD(e,r,1)+1;return this.newDate(e,r,n)}}),n.calendars.islamic=a},{"../main":341,"object-assign":242}],333:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new n.baseCalendar,i(a.prototype,{name:"Julian",jdEpoch:1721423.5,daysPerMonth:[31,28,31,30,31,30,31,31,30,31,30,31],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Julian",epochs:["BC","AD"],monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],digits:null,dateFormat:"mm/dd/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);return(t=e.year()<0?e.year()+1:e.year())%4==0},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(4-(n.dayOfWeek()||7),"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var r=this._validate(t,e,this.minDay,n.local.invalidMonth);return this.daysPerMonth[r.month()-1]+(2===r.month()&&this.leapYear(r.year())?1:0)},weekDay:function(t,e,r){return(this.dayOfWeek(t,e,r)||7)<6},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);return t=i.year(),e=i.month(),r=i.day(),t<0&&t++,e<=2&&(t--,e+=12),Math.floor(365.25*(t+4716))+Math.floor(30.6001*(e+1))+r-1524.5},fromJD:function(t){var e=Math.floor(t+.5)+1524,r=Math.floor((e-122.1)/365.25),n=Math.floor(365.25*r),i=Math.floor((e-n)/30.6001),a=i-Math.floor(i<14?1:13),o=r-Math.floor(a>2?4716:4715),s=e-n-Math.floor(30.6001*i);return o<=0&&o--,this.newDate(o,a,s)}}),n.calendars.julian=a},{"../main":341,"object-assign":242}],334:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}function o(t,e){return t-e*Math.floor(t/e)}function s(t,e){return o(t-1,e)+1}a.prototype=new n.baseCalendar,i(a.prototype,{name:"Mayan",jdEpoch:584282.5,hasYearZero:!0,minMonth:0,firstMonth:0,minDay:0,regionalOptions:{"":{name:"Mayan",epochs:["",""],monthNames:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17"],monthNamesShort:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17"],dayNames:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19"],dayNamesShort:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19"],dayNamesMin:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19"],digits:null,dateFormat:"YYYY.m.d",firstDay:0,isRTL:!1,haabMonths:["Pop","Uo","Zip","Zotz","Tzec","Xul","Yaxkin","Mol","Chen","Yax","Zac","Ceh","Mac","Kankin","Muan","Pax","Kayab","Cumku","Uayeb"],tzolkinMonths:["Imix","Ik","Akbal","Kan","Chicchan","Cimi","Manik","Lamat","Muluc","Oc","Chuen","Eb","Ben","Ix","Men","Cib","Caban","Etznab","Cauac","Ahau"]}},leapYear:function(t){return this._validate(t,this.minMonth,this.minDay,n.local.invalidYear),!1},formatYear:function(t){t=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear).year();var e=Math.floor(t/400);return t%=400,t+=t<0?400:0,e+"."+Math.floor(t/20)+"."+t%20},forYear:function(t){if((t=t.split(".")).length<3)throw"Invalid Mayan year";for(var e=0,r=0;r19||r>0&&n<0)throw"Invalid Mayan year";e=20*e+n}return e},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,n.local.invalidYear),18},weekOfYear:function(t,e,r){return this._validate(t,e,r,n.local.invalidDate),0},daysInYear:function(t){return this._validate(t,this.minMonth,this.minDay,n.local.invalidYear),360},daysInMonth:function(t,e){return this._validate(t,e,this.minDay,n.local.invalidMonth),20},daysInWeek:function(){return 5},dayOfWeek:function(t,e,r){return this._validate(t,e,r,n.local.invalidDate).day()},weekDay:function(t,e,r){return this._validate(t,e,r,n.local.invalidDate),!0},extraInfo:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate).toJD(),a=this._toHaab(i),o=this._toTzolkin(i);return{haabMonthName:this.local.haabMonths[a[0]-1],haabMonth:a[0],haabDay:a[1],tzolkinDayName:this.local.tzolkinMonths[o[0]-1],tzolkinDay:o[0],tzolkinTrecena:o[1]}},_toHaab:function(t){var e=o((t-=this.jdEpoch)+8+340,365);return[Math.floor(e/20)+1,o(e,20)]},_toTzolkin:function(t){return[s((t-=this.jdEpoch)+20,20),s(t+4,13)]},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);return i.day()+20*i.month()+360*i.year()+this.jdEpoch},fromJD:function(t){t=Math.floor(t)+.5-this.jdEpoch;var e=Math.floor(t/360);t%=360,t+=t<0?360:0;var r=Math.floor(t/20),n=t%20;return this.newDate(e,r,n)}}),n.calendars.mayan=a},{"../main":341,"object-assign":242}],335:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new n.baseCalendar;var o=n.instance("gregorian");i(a.prototype,{name:"Nanakshahi",jdEpoch:2257673.5,daysPerMonth:[31,31,31,31,31,30,30,30,30,30,30,30],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Nanakshahi",epochs:["BN","AN"],monthNames:["Chet","Vaisakh","Jeth","Harh","Sawan","Bhadon","Assu","Katak","Maghar","Poh","Magh","Phagun"],monthNamesShort:["Che","Vai","Jet","Har","Saw","Bha","Ass","Kat","Mgr","Poh","Mgh","Pha"],dayNames:["Somvaar","Mangalvar","Budhvaar","Veervaar","Shukarvaar","Sanicharvaar","Etvaar"],dayNamesShort:["Som","Mangal","Budh","Veer","Shukar","Sanichar","Et"],dayNamesMin:["So","Ma","Bu","Ve","Sh","Sa","Et"],digits:null,dateFormat:"dd-mm-yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear||n.regionalOptions[""].invalidYear);return o.leapYear(e.year()+(e.year()<1?1:0)+1469)},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(1-(n.dayOfWeek()||7),"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var r=this._validate(t,e,this.minDay,n.local.invalidMonth);return this.daysPerMonth[r.month()-1]+(12===r.month()&&this.leapYear(r.year())?1:0)},weekDay:function(t,e,r){return(this.dayOfWeek(t,e,r)||7)<6},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidMonth);(t=i.year())<0&&t++;for(var a=i.day(),s=1;s=this.toJD(e+1,1,1);)e++;for(var r=t-Math.floor(this.toJD(e,1,1)+.5)+1,n=1;r>this.daysInMonth(e,n);)r-=this.daysInMonth(e,n),n++;return this.newDate(e,n,r)}}),n.calendars.nanakshahi=a},{"../main":341,"object-assign":242}],336:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new n.baseCalendar,i(a.prototype,{name:"Nepali",jdEpoch:1700709.5,daysPerMonth:[31,31,32,32,31,30,30,29,30,29,30,30],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,daysPerYear:365,regionalOptions:{"":{name:"Nepali",epochs:["BBS","ABS"],monthNames:["Baisakh","Jestha","Ashadh","Shrawan","Bhadra","Ashwin","Kartik","Mangsir","Paush","Mangh","Falgun","Chaitra"],monthNamesShort:["Bai","Je","As","Shra","Bha","Ash","Kar","Mang","Pau","Ma","Fal","Chai"],dayNames:["Aaitabaar","Sombaar","Manglbaar","Budhabaar","Bihibaar","Shukrabaar","Shanibaar"],dayNamesShort:["Aaita","Som","Mangl","Budha","Bihi","Shukra","Shani"],dayNamesMin:["Aai","So","Man","Bu","Bi","Shu","Sha"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:1,isRTL:!1}},leapYear:function(t){return this.daysInYear(t)!==this.daysPerYear},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(-n.dayOfWeek(),"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInYear:function(t){if(t=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear).year(),void 0===this.NEPALI_CALENDAR_DATA[t])return this.daysPerYear;for(var e=0,r=this.minMonth;r<=12;r++)e+=this.NEPALI_CALENDAR_DATA[t][r];return e},daysInMonth:function(t,e){return t.year&&(e=t.month(),t=t.year()),this._validate(t,e,this.minDay,n.local.invalidMonth),void 0===this.NEPALI_CALENDAR_DATA[t]?this.daysPerMonth[e-1]:this.NEPALI_CALENDAR_DATA[t][e]},weekDay:function(t,e,r){return 6!==this.dayOfWeek(t,e,r)},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);t=i.year(),e=i.month(),r=i.day();var a=n.instance(),o=0,s=e,l=t;this._createMissingCalendarData(t);var c=t-(s>9||9===s&&r>=this.NEPALI_CALENDAR_DATA[l][0]?56:57);for(9!==e&&(o=r,s--);9!==s;)s<=0&&(s=12,l--),o+=this.NEPALI_CALENDAR_DATA[l][s],s--;return 9===e?(o+=r-this.NEPALI_CALENDAR_DATA[l][0])<0&&(o+=a.daysInYear(c)):o+=this.NEPALI_CALENDAR_DATA[l][9]-this.NEPALI_CALENDAR_DATA[l][0],a.newDate(c,1,1).add(o,"d").toJD()},fromJD:function(t){var e=n.instance().fromJD(t),r=e.year(),i=e.dayOfYear(),a=r+56;this._createMissingCalendarData(a);for(var o=9,s=this.NEPALI_CALENDAR_DATA[a][0],l=this.NEPALI_CALENDAR_DATA[a][o]-s+1;i>l;)++o>12&&(o=1,a++),l+=this.NEPALI_CALENDAR_DATA[a][o];var c=this.NEPALI_CALENDAR_DATA[a][o]-(l-i);return this.newDate(a,o,c)},_createMissingCalendarData:function(t){var e=this.daysPerMonth.slice(0);e.unshift(17);for(var r=t-1;r0?474:473))%2820+474+38)%2816<682},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(-(n.dayOfWeek()+1)%7,"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var r=this._validate(t,e,this.minDay,n.local.invalidMonth);return this.daysPerMonth[r.month()-1]+(12===r.month()&&this.leapYear(r.year())?1:0)},weekDay:function(t,e,r){return 5!==this.dayOfWeek(t,e,r)},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);t=i.year(),e=i.month(),r=i.day();var a=t-(t>=0?474:473),s=474+o(a,2820);return r+(e<=7?31*(e-1):30*(e-1)+6)+Math.floor((682*s-110)/2816)+365*(s-1)+1029983*Math.floor(a/2820)+this.jdEpoch-1},fromJD:function(t){var e=(t=Math.floor(t)+.5)-this.toJD(475,1,1),r=Math.floor(e/1029983),n=o(e,1029983),i=2820;if(1029982!==n){var a=Math.floor(n/366),s=o(n,366);i=Math.floor((2134*a+2816*s+2815)/1028522)+a+1}var l=i+2820*r+474;l=l<=0?l-1:l;var c=t-this.toJD(l,1,1)+1,u=c<=186?Math.ceil(c/31):Math.ceil((c-6)/30),f=t-this.toJD(l,u,1)+1;return this.newDate(l,u,f)}}),n.calendars.persian=a,n.calendars.jalali=a},{"../main":341,"object-assign":242}],338:[function(t,e,r){var n=t("../main"),i=t("object-assign"),a=n.instance();function o(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}o.prototype=new n.baseCalendar,i(o.prototype,{name:"Taiwan",jdEpoch:2419402.5,yearsOffset:1911,daysPerMonth:[31,28,31,30,31,30,31,31,30,31,30,31],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Taiwan",epochs:["BROC","ROC"],monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],digits:null,dateFormat:"yyyy/mm/dd",firstDay:1,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);t=this._t2gYear(e.year());return a.leapYear(t)},weekOfYear:function(t,e,r){var i=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);t=this._t2gYear(i.year());return a.weekOfYear(t,i.month(),i.day())},daysInMonth:function(t,e){var r=this._validate(t,e,this.minDay,n.local.invalidMonth);return this.daysPerMonth[r.month()-1]+(2===r.month()&&this.leapYear(r.year())?1:0)},weekDay:function(t,e,r){return(this.dayOfWeek(t,e,r)||7)<6},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);t=this._t2gYear(i.year());return a.toJD(t,i.month(),i.day())},fromJD:function(t){var e=a.fromJD(t),r=this._g2tYear(e.year());return this.newDate(r,e.month(),e.day())},_t2gYear:function(t){return t+this.yearsOffset+(t>=-this.yearsOffset&&t<=-1?1:0)},_g2tYear:function(t){return t-this.yearsOffset-(t>=1&&t<=this.yearsOffset?1:0)}}),n.calendars.taiwan=o},{"../main":341,"object-assign":242}],339:[function(t,e,r){var n=t("../main"),i=t("object-assign"),a=n.instance();function o(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}o.prototype=new n.baseCalendar,i(o.prototype,{name:"Thai",jdEpoch:1523098.5,yearsOffset:543,daysPerMonth:[31,28,31,30,31,30,31,31,30,31,30,31],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Thai",epochs:["BBE","BE"],monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);t=this._t2gYear(e.year());return a.leapYear(t)},weekOfYear:function(t,e,r){var i=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);t=this._t2gYear(i.year());return a.weekOfYear(t,i.month(),i.day())},daysInMonth:function(t,e){var r=this._validate(t,e,this.minDay,n.local.invalidMonth);return this.daysPerMonth[r.month()-1]+(2===r.month()&&this.leapYear(r.year())?1:0)},weekDay:function(t,e,r){return(this.dayOfWeek(t,e,r)||7)<6},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate);t=this._t2gYear(i.year());return a.toJD(t,i.month(),i.day())},fromJD:function(t){var e=a.fromJD(t),r=this._g2tYear(e.year());return this.newDate(r,e.month(),e.day())},_t2gYear:function(t){return t-this.yearsOffset-(t>=1&&t<=this.yearsOffset?1:0)},_g2tYear:function(t){return t+this.yearsOffset+(t>=-this.yearsOffset&&t<=-1?1:0)}}),n.calendars.thai=o},{"../main":341,"object-assign":242}],340:[function(t,e,r){var n=t("../main"),i=t("object-assign");function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new n.baseCalendar,i(a.prototype,{name:"UmmAlQura",hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Umm al-Qura",epochs:["BH","AH"],monthNames:["Al-Muharram","Safar","Rabi' al-awwal","Rabi' Al-Thani","Jumada Al-Awwal","Jumada Al-Thani","Rajab","Sha'aban","Ramadan","Shawwal","Dhu al-Qi'dah","Dhu al-Hijjah"],monthNamesShort:["Muh","Saf","Rab1","Rab2","Jum1","Jum2","Raj","Sha'","Ram","Shaw","DhuQ","DhuH"],dayNames:["Yawm al-Ahad","Yawm al-Ithnain","Yawm al-Thal\u0101th\u0101\u2019","Yawm al-Arba\u2018\u0101\u2019","Yawm al-Kham\u012bs","Yawm al-Jum\u2018a","Yawm al-Sabt"],dayNamesMin:["Ah","Ith","Th","Ar","Kh","Ju","Sa"],digits:null,dateFormat:"yyyy/mm/dd",firstDay:6,isRTL:!0}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,n.local.invalidYear);return 355===this.daysInYear(e.year())},weekOfYear:function(t,e,r){var n=this.newDate(t,e,r);return n.add(-n.dayOfWeek(),"d"),Math.floor((n.dayOfYear()-1)/7)+1},daysInYear:function(t){for(var e=0,r=1;r<=12;r++)e+=this.daysInMonth(t,r);return e},daysInMonth:function(t,e){for(var r=this._validate(t,e,this.minDay,n.local.invalidMonth).toJD()-24e5+.5,i=0,a=0;ar)return o[i]-o[i-1];i++}return 30},weekDay:function(t,e,r){return 5!==this.dayOfWeek(t,e,r)},toJD:function(t,e,r){var i=this._validate(t,e,r,n.local.invalidDate),a=12*(i.year()-1)+i.month()-15292;return i.day()+o[a-1]-1+24e5-.5},fromJD:function(t){for(var e=t-24e5+.5,r=0,n=0;ne);n++)r++;var i=r+15292,a=Math.floor((i-1)/12),s=a+1,l=i-12*a,c=e-o[r-1]+1;return this.newDate(s,l,c)},isValid:function(t,e,r){var i=n.baseCalendar.prototype.isValid.apply(this,arguments);return i&&(i=(t=null!=t.year?t.year:t)>=1276&&t<=1500),i},_validate:function(t,e,r,i){var a=n.baseCalendar.prototype._validate.apply(this,arguments);if(a.year<1276||a.year>1500)throw i.replace(/\{0\}/,this.local.name);return a}}),n.calendars.ummalqura=a;var o=[20,50,79,109,138,168,197,227,256,286,315,345,374,404,433,463,492,522,551,581,611,641,670,700,729,759,788,818,847,877,906,936,965,995,1024,1054,1083,1113,1142,1172,1201,1231,1260,1290,1320,1350,1379,1409,1438,1468,1497,1527,1556,1586,1615,1645,1674,1704,1733,1763,1792,1822,1851,1881,1910,1940,1969,1999,2028,2058,2087,2117,2146,2176,2205,2235,2264,2294,2323,2353,2383,2413,2442,2472,2501,2531,2560,2590,2619,2649,2678,2708,2737,2767,2796,2826,2855,2885,2914,2944,2973,3003,3032,3062,3091,3121,3150,3180,3209,3239,3268,3298,3327,3357,3386,3416,3446,3476,3505,3535,3564,3594,3623,3653,3682,3712,3741,3771,3800,3830,3859,3889,3918,3948,3977,4007,4036,4066,4095,4125,4155,4185,4214,4244,4273,4303,4332,4362,4391,4421,4450,4480,4509,4539,4568,4598,4627,4657,4686,4716,4745,4775,4804,4834,4863,4893,4922,4952,4981,5011,5040,5070,5099,5129,5158,5188,5218,5248,5277,5307,5336,5366,5395,5425,5454,5484,5513,5543,5572,5602,5631,5661,5690,5720,5749,5779,5808,5838,5867,5897,5926,5956,5985,6015,6044,6074,6103,6133,6162,6192,6221,6251,6281,6311,6340,6370,6399,6429,6458,6488,6517,6547,6576,6606,6635,6665,6694,6724,6753,6783,6812,6842,6871,6901,6930,6960,6989,7019,7048,7078,7107,7137,7166,7196,7225,7255,7284,7314,7344,7374,7403,7433,7462,7492,7521,7551,7580,7610,7639,7669,7698,7728,7757,7787,7816,7846,7875,7905,7934,7964,7993,8023,8053,8083,8112,8142,8171,8201,8230,8260,8289,8319,8348,8378,8407,8437,8466,8496,8525,8555,8584,8614,8643,8673,8702,8732,8761,8791,8821,8850,8880,8909,8938,8968,8997,9027,9056,9086,9115,9145,9175,9205,9234,9264,9293,9322,9352,9381,9410,9440,9470,9499,9529,9559,9589,9618,9648,9677,9706,9736,9765,9794,9824,9853,9883,9913,9943,9972,10002,10032,10061,10090,10120,10149,10178,10208,10237,10267,10297,10326,10356,10386,10415,10445,10474,10504,10533,10562,10592,10621,10651,10680,10710,10740,10770,10799,10829,10858,10888,10917,10947,10976,11005,11035,11064,11094,11124,11153,11183,11213,11242,11272,11301,11331,11360,11389,11419,11448,11478,11507,11537,11567,11596,11626,11655,11685,11715,11744,11774,11803,11832,11862,11891,11921,11950,11980,12010,12039,12069,12099,12128,12158,12187,12216,12246,12275,12304,12334,12364,12393,12423,12453,12483,12512,12542,12571,12600,12630,12659,12688,12718,12747,12777,12807,12837,12866,12896,12926,12955,12984,13014,13043,13072,13102,13131,13161,13191,13220,13250,13280,13310,13339,13368,13398,13427,13456,13486,13515,13545,13574,13604,13634,13664,13693,13723,13752,13782,13811,13840,13870,13899,13929,13958,13988,14018,14047,14077,14107,14136,14166,14195,14224,14254,14283,14313,14342,14372,14401,14431,14461,14490,14520,14550,14579,14609,14638,14667,14697,14726,14756,14785,14815,14844,14874,14904,14933,14963,14993,15021,15051,15081,15110,15140,15169,15199,15228,15258,15287,15317,15347,15377,15406,15436,15465,15494,15524,15553,15582,15612,15641,15671,15701,15731,15760,15790,15820,15849,15878,15908,15937,15966,15996,16025,16055,16085,16114,16144,16174,16204,16233,16262,16292,16321,16350,16380,16409,16439,16468,16498,16528,16558,16587,16617,16646,16676,16705,16734,16764,16793,16823,16852,16882,16912,16941,16971,17001,17030,17060,17089,17118,17148,17177,17207,17236,17266,17295,17325,17355,17384,17414,17444,17473,17502,17532,17561,17591,17620,17650,17679,17709,17738,17768,17798,17827,17857,17886,17916,17945,17975,18004,18034,18063,18093,18122,18152,18181,18211,18241,18270,18300,18330,18359,18388,18418,18447,18476,18506,18535,18565,18595,18625,18654,18684,18714,18743,18772,18802,18831,18860,18890,18919,18949,18979,19008,19038,19068,19098,19127,19156,19186,19215,19244,19274,19303,19333,19362,19392,19422,19452,19481,19511,19540,19570,19599,19628,19658,19687,19717,19746,19776,19806,19836,19865,19895,19924,19954,19983,20012,20042,20071,20101,20130,20160,20190,20219,20249,20279,20308,20338,20367,20396,20426,20455,20485,20514,20544,20573,20603,20633,20662,20692,20721,20751,20780,20810,20839,20869,20898,20928,20957,20987,21016,21046,21076,21105,21135,21164,21194,21223,21253,21282,21312,21341,21371,21400,21430,21459,21489,21519,21548,21578,21607,21637,21666,21696,21725,21754,21784,21813,21843,21873,21902,21932,21962,21991,22021,22050,22080,22109,22138,22168,22197,22227,22256,22286,22316,22346,22375,22405,22434,22464,22493,22522,22552,22581,22611,22640,22670,22700,22730,22759,22789,22818,22848,22877,22906,22936,22965,22994,23024,23054,23083,23113,23143,23173,23202,23232,23261,23290,23320,23349,23379,23408,23438,23467,23497,23527,23556,23586,23616,23645,23674,23704,23733,23763,23792,23822,23851,23881,23910,23940,23970,23999,24029,24058,24088,24117,24147,24176,24206,24235,24265,24294,24324,24353,24383,24413,24442,24472,24501,24531,24560,24590,24619,24648,24678,24707,24737,24767,24796,24826,24856,24885,24915,24944,24974,25003,25032,25062,25091,25121,25150,25180,25210,25240,25269,25299,25328,25358,25387,25416,25446,25475,25505,25534,25564,25594,25624,25653,25683,25712,25742,25771,25800,25830,25859,25888,25918,25948,25977,26007,26037,26067,26096,26126,26155,26184,26214,26243,26272,26302,26332,26361,26391,26421,26451,26480,26510,26539,26568,26598,26627,26656,26686,26715,26745,26775,26805,26834,26864,26893,26923,26952,26982,27011,27041,27070,27099,27129,27159,27188,27218,27248,27277,27307,27336,27366,27395,27425,27454,27484,27513,27542,27572,27602,27631,27661,27691,27720,27750,27779,27809,27838,27868,27897,27926,27956,27985,28015,28045,28074,28104,28134,28163,28193,28222,28252,28281,28310,28340,28369,28399,28428,28458,28488,28517,28547,28577,28607,28636,28665,28695,28724,28754,28783,28813,28843,28872,28901,28931,28960,28990,29019,29049,29078,29108,29137,29167,29196,29226,29255,29285,29315,29345,29375,29404,29434,29463,29492,29522,29551,29580,29610,29640,29669,29699,29729,29759,29788,29818,29847,29876,29906,29935,29964,29994,30023,30053,30082,30112,30141,30171,30200,30230,30259,30289,30318,30348,30378,30408,30437,30467,30496,30526,30555,30585,30614,30644,30673,30703,30732,30762,30791,30821,30850,30880,30909,30939,30968,30998,31027,31057,31086,31116,31145,31175,31204,31234,31263,31293,31322,31352,31381,31411,31441,31471,31500,31530,31559,31589,31618,31648,31676,31706,31736,31766,31795,31825,31854,31884,31913,31943,31972,32002,32031,32061,32090,32120,32150,32180,32209,32239,32268,32298,32327,32357,32386,32416,32445,32475,32504,32534,32563,32593,32622,32652,32681,32711,32740,32770,32799,32829,32858,32888,32917,32947,32976,33006,33035,33065,33094,33124,33153,33183,33213,33243,33272,33302,33331,33361,33390,33420,33450,33479,33509,33539,33568,33598,33627,33657,33686,33716,33745,33775,33804,33834,33863,33893,33922,33952,33981,34011,34040,34069,34099,34128,34158,34187,34217,34247,34277,34306,34336,34365,34395,34424,34454,34483,34512,34542,34571,34601,34631,34660,34690,34719,34749,34778,34808,34837,34867,34896,34926,34955,34985,35015,35044,35074,35103,35133,35162,35192,35222,35251,35280,35310,35340,35370,35399,35429,35458,35488,35517,35547,35576,35605,35635,35665,35694,35723,35753,35782,35811,35841,35871,35901,35930,35960,35989,36019,36048,36078,36107,36136,36166,36195,36225,36254,36284,36314,36343,36373,36403,36433,36462,36492,36521,36551,36580,36610,36639,36669,36698,36728,36757,36786,36816,36845,36875,36904,36934,36963,36993,37022,37052,37081,37111,37141,37170,37200,37229,37259,37288,37318,37347,37377,37406,37436,37465,37495,37524,37554,37584,37613,37643,37672,37701,37731,37760,37790,37819,37849,37878,37908,37938,37967,37997,38027,38056,38085,38115,38144,38174,38203,38233,38262,38292,38322,38351,38381,38410,38440,38469,38499,38528,38558,38587,38617,38646,38676,38705,38735,38764,38794,38823,38853,38882,38912,38941,38971,39001,39030,39059,39089,39118,39148,39178,39208,39237,39267,39297,39326,39355,39385,39414,39444,39473,39503,39532,39562,39592,39621,39650,39680,39709,39739,39768,39798,39827,39857,39886,39916,39946,39975,40005,40035,40064,40094,40123,40153,40182,40212,40241,40271,40300,40330,40359,40389,40418,40448,40477,40507,40536,40566,40595,40625,40655,40685,40714,40744,40773,40803,40832,40862,40892,40921,40951,40980,41009,41039,41068,41098,41127,41157,41186,41216,41245,41275,41304,41334,41364,41393,41422,41452,41481,41511,41540,41570,41599,41629,41658,41688,41718,41748,41777,41807,41836,41865,41894,41924,41953,41983,42012,42042,42072,42102,42131,42161,42190,42220,42249,42279,42308,42337,42367,42397,42426,42456,42485,42515,42545,42574,42604,42633,42662,42692,42721,42751,42780,42810,42839,42869,42899,42929,42958,42988,43017,43046,43076,43105,43135,43164,43194,43223,43253,43283,43312,43342,43371,43401,43430,43460,43489,43519,43548,43578,43607,43637,43666,43696,43726,43755,43785,43814,43844,43873,43903,43932,43962,43991,44021,44050,44080,44109,44139,44169,44198,44228,44258,44287,44317,44346,44375,44405,44434,44464,44493,44523,44553,44582,44612,44641,44671,44700,44730,44759,44788,44818,44847,44877,44906,44936,44966,44996,45025,45055,45084,45114,45143,45172,45202,45231,45261,45290,45320,45350,45380,45409,45439,45468,45498,45527,45556,45586,45615,45644,45674,45704,45733,45763,45793,45823,45852,45882,45911,45940,45970,45999,46028,46058,46088,46117,46147,46177,46206,46236,46265,46295,46324,46354,46383,46413,46442,46472,46501,46531,46560,46590,46620,46649,46679,46708,46738,46767,46797,46826,46856,46885,46915,46944,46974,47003,47033,47063,47092,47122,47151,47181,47210,47240,47269,47298,47328,47357,47387,47417,47446,47476,47506,47535,47565,47594,47624,47653,47682,47712,47741,47771,47800,47830,47860,47890,47919,47949,47978,48008,48037,48066,48096,48125,48155,48184,48214,48244,48273,48303,48333,48362,48392,48421,48450,48480,48509,48538,48568,48598,48627,48657,48687,48717,48746,48776,48805,48834,48864,48893,48922,48952,48982,49011,49041,49071,49100,49130,49160,49189,49218,49248,49277,49306,49336,49365,49395,49425,49455,49484,49514,49543,49573,49602,49632,49661,49690,49720,49749,49779,49809,49838,49868,49898,49927,49957,49986,50016,50045,50075,50104,50133,50163,50192,50222,50252,50281,50311,50340,50370,50400,50429,50459,50488,50518,50547,50576,50606,50635,50665,50694,50724,50754,50784,50813,50843,50872,50902,50931,50960,50990,51019,51049,51078,51108,51138,51167,51197,51227,51256,51286,51315,51345,51374,51403,51433,51462,51492,51522,51552,51582,51611,51641,51670,51699,51729,51758,51787,51816,51846,51876,51906,51936,51965,51995,52025,52054,52083,52113,52142,52171,52200,52230,52260,52290,52319,52349,52379,52408,52438,52467,52497,52526,52555,52585,52614,52644,52673,52703,52733,52762,52792,52822,52851,52881,52910,52939,52969,52998,53028,53057,53087,53116,53146,53176,53205,53235,53264,53294,53324,53353,53383,53412,53441,53471,53500,53530,53559,53589,53619,53648,53678,53708,53737,53767,53796,53825,53855,53884,53913,53943,53973,54003,54032,54062,54092,54121,54151,54180,54209,54239,54268,54297,54327,54357,54387,54416,54446,54476,54505,54535,54564,54593,54623,54652,54681,54711,54741,54770,54800,54830,54859,54889,54919,54948,54977,55007,55036,55066,55095,55125,55154,55184,55213,55243,55273,55302,55332,55361,55391,55420,55450,55479,55508,55538,55567,55597,55627,55657,55686,55716,55745,55775,55804,55834,55863,55892,55922,55951,55981,56011,56040,56070,56100,56129,56159,56188,56218,56247,56276,56306,56335,56365,56394,56424,56454,56483,56513,56543,56572,56601,56631,56660,56690,56719,56749,56778,56808,56837,56867,56897,56926,56956,56985,57015,57044,57074,57103,57133,57162,57192,57221,57251,57280,57310,57340,57369,57399,57429,57458,57487,57517,57546,57576,57605,57634,57664,57694,57723,57753,57783,57813,57842,57871,57901,57930,57959,57989,58018,58048,58077,58107,58137,58167,58196,58226,58255,58285,58314,58343,58373,58402,58432,58461,58491,58521,58551,58580,58610,58639,58669,58698,58727,58757,58786,58816,58845,58875,58905,58934,58964,58994,59023,59053,59082,59111,59141,59170,59200,59229,59259,59288,59318,59348,59377,59407,59436,59466,59495,59525,59554,59584,59613,59643,59672,59702,59731,59761,59791,59820,59850,59879,59909,59939,59968,59997,60027,60056,60086,60115,60145,60174,60204,60234,60264,60293,60323,60352,60381,60411,60440,60469,60499,60528,60558,60588,60618,60648,60677,60707,60736,60765,60795,60824,60853,60883,60912,60942,60972,61002,61031,61061,61090,61120,61149,61179,61208,61237,61267,61296,61326,61356,61385,61415,61445,61474,61504,61533,61563,61592,61621,61651,61680,61710,61739,61769,61799,61828,61858,61888,61917,61947,61976,62006,62035,62064,62094,62123,62153,62182,62212,62242,62271,62301,62331,62360,62390,62419,62448,62478,62507,62537,62566,62596,62625,62655,62685,62715,62744,62774,62803,62832,62862,62891,62921,62950,62980,63009,63039,63069,63099,63128,63157,63187,63216,63246,63275,63305,63334,63363,63393,63423,63453,63482,63512,63541,63571,63600,63630,63659,63689,63718,63747,63777,63807,63836,63866,63895,63925,63955,63984,64014,64043,64073,64102,64131,64161,64190,64220,64249,64279,64309,64339,64368,64398,64427,64457,64486,64515,64545,64574,64603,64633,64663,64692,64722,64752,64782,64811,64841,64870,64899,64929,64958,64987,65017,65047,65076,65106,65136,65166,65195,65225,65254,65283,65313,65342,65371,65401,65431,65460,65490,65520,65549,65579,65608,65638,65667,65697,65726,65755,65785,65815,65844,65874,65903,65933,65963,65992,66022,66051,66081,66110,66140,66169,66199,66228,66258,66287,66317,66346,66376,66405,66435,66465,66494,66524,66553,66583,66612,66641,66671,66700,66730,66760,66789,66819,66849,66878,66908,66937,66967,66996,67025,67055,67084,67114,67143,67173,67203,67233,67262,67292,67321,67351,67380,67409,67439,67468,67497,67527,67557,67587,67617,67646,67676,67705,67735,67764,67793,67823,67852,67882,67911,67941,67971,68e3,68030,68060,68089,68119,68148,68177,68207,68236,68266,68295,68325,68354,68384,68414,68443,68473,68502,68532,68561,68591,68620,68650,68679,68708,68738,68768,68797,68827,68857,68886,68916,68946,68975,69004,69034,69063,69092,69122,69152,69181,69211,69240,69270,69300,69330,69359,69388,69418,69447,69476,69506,69535,69565,69595,69624,69654,69684,69713,69743,69772,69802,69831,69861,69890,69919,69949,69978,70008,70038,70067,70097,70126,70156,70186,70215,70245,70274,70303,70333,70362,70392,70421,70451,70481,70510,70540,70570,70599,70629,70658,70687,70717,70746,70776,70805,70835,70864,70894,70924,70954,70983,71013,71042,71071,71101,71130,71159,71189,71218,71248,71278,71308,71337,71367,71397,71426,71455,71485,71514,71543,71573,71602,71632,71662,71691,71721,71751,71781,71810,71839,71869,71898,71927,71957,71986,72016,72046,72075,72105,72135,72164,72194,72223,72253,72282,72311,72341,72370,72400,72429,72459,72489,72518,72548,72577,72607,72637,72666,72695,72725,72754,72784,72813,72843,72872,72902,72931,72961,72991,73020,73050,73080,73109,73139,73168,73197,73227,73256,73286,73315,73345,73375,73404,73434,73464,73493,73523,73552,73581,73611,73640,73669,73699,73729,73758,73788,73818,73848,73877,73907,73936,73965,73995,74024,74053,74083,74113,74142,74172,74202,74231,74261,74291,74320,74349,74379,74408,74437,74467,74497,74526,74556,74586,74615,74645,74675,74704,74733,74763,74792,74822,74851,74881,74910,74940,74969,74999,75029,75058,75088,75117,75147,75176,75206,75235,75264,75294,75323,75353,75383,75412,75442,75472,75501,75531,75560,75590,75619,75648,75678,75707,75737,75766,75796,75826,75856,75885,75915,75944,75974,76003,76032,76062,76091,76121,76150,76180,76210,76239,76269,76299,76328,76358,76387,76416,76446,76475,76505,76534,76564,76593,76623,76653,76682,76712,76741,76771,76801,76830,76859,76889,76918,76948,76977,77007,77036,77066,77096,77125,77155,77185,77214,77243,77273,77302,77332,77361,77390,77420,77450,77479,77509,77539,77569,77598,77627,77657,77686,77715,77745,77774,77804,77833,77863,77893,77923,77952,77982,78011,78041,78070,78099,78129,78158,78188,78217,78247,78277,78307,78336,78366,78395,78425,78454,78483,78513,78542,78572,78601,78631,78661,78690,78720,78750,78779,78808,78838,78867,78897,78926,78956,78985,79015,79044,79074,79104,79133,79163,79192,79222,79251,79281,79310,79340,79369,79399,79428,79458,79487,79517,79546,79576,79606,79635,79665,79695,79724,79753,79783,79812,79841,79871,79900,79930,79960,79990]},{"../main":341,"object-assign":242}],341:[function(t,e,r){var n=t("object-assign");function i(){this.regionalOptions=[],this.regionalOptions[""]={invalidCalendar:"Calendar {0} not found",invalidDate:"Invalid {0} date",invalidMonth:"Invalid {0} month",invalidYear:"Invalid {0} year",differentCalendars:"Cannot mix {0} and {1} dates"},this.local=this.regionalOptions[""],this.calendars={},this._localCals={}}function a(t,e,r,n){if(this._calendar=t,this._year=e,this._month=r,this._day=n,0===this._calendar._validateLevel&&!this._calendar.isValid(this._year,this._month,this._day))throw(c.local.invalidDate||c.regionalOptions[""].invalidDate).replace(/\{0\}/,this._calendar.local.name)}function o(t,e){return"000000".substring(0,e-(t=""+t).length)+t}function s(){this.shortYearCutoff="+10"}function l(t){this.local=this.regionalOptions[t]||this.regionalOptions[""]}n(i.prototype,{instance:function(t,e){t=(t||"gregorian").toLowerCase(),e=e||"";var r=this._localCals[t+"-"+e];if(!r&&this.calendars[t]&&(r=new this.calendars[t](e),this._localCals[t+"-"+e]=r),!r)throw(this.local.invalidCalendar||this.regionalOptions[""].invalidCalendar).replace(/\{0\}/,t);return r},newDate:function(t,e,r,n,i){return(n=(null!=t&&t.year?t.calendar():"string"==typeof n?this.instance(n,i):n)||this.instance()).newDate(t,e,r)},substituteDigits:function(t){return function(e){return(e+"").replace(/[0-9]/g,(function(e){return t[e]}))}},substituteChineseDigits:function(t,e){return function(r){for(var n="",i=0;r>0;){var a=r%10;n=(0===a?"":t[a]+e[i])+n,i++,r=Math.floor(r/10)}return 0===n.indexOf(t[1]+e[1])&&(n=n.substr(1)),n||t[0]}}}),n(a.prototype,{newDate:function(t,e,r){return this._calendar.newDate(null==t?this:t,e,r)},year:function(t){return 0===arguments.length?this._year:this.set(t,"y")},month:function(t){return 0===arguments.length?this._month:this.set(t,"m")},day:function(t){return 0===arguments.length?this._day:this.set(t,"d")},date:function(t,e,r){if(!this._calendar.isValid(t,e,r))throw(c.local.invalidDate||c.regionalOptions[""].invalidDate).replace(/\{0\}/,this._calendar.local.name);return this._year=t,this._month=e,this._day=r,this},leapYear:function(){return this._calendar.leapYear(this)},epoch:function(){return this._calendar.epoch(this)},formatYear:function(){return this._calendar.formatYear(this)},monthOfYear:function(){return this._calendar.monthOfYear(this)},weekOfYear:function(){return this._calendar.weekOfYear(this)},daysInYear:function(){return this._calendar.daysInYear(this)},dayOfYear:function(){return this._calendar.dayOfYear(this)},daysInMonth:function(){return this._calendar.daysInMonth(this)},dayOfWeek:function(){return this._calendar.dayOfWeek(this)},weekDay:function(){return this._calendar.weekDay(this)},extraInfo:function(){return this._calendar.extraInfo(this)},add:function(t,e){return this._calendar.add(this,t,e)},set:function(t,e){return this._calendar.set(this,t,e)},compareTo:function(t){if(this._calendar.name!==t._calendar.name)throw(c.local.differentCalendars||c.regionalOptions[""].differentCalendars).replace(/\{0\}/,this._calendar.local.name).replace(/\{1\}/,t._calendar.local.name);var e=this._year!==t._year?this._year-t._year:this._month!==t._month?this.monthOfYear()-t.monthOfYear():this._day-t._day;return 0===e?0:e<0?-1:1},calendar:function(){return this._calendar},toJD:function(){return this._calendar.toJD(this)},fromJD:function(t){return this._calendar.fromJD(t)},toJSDate:function(){return this._calendar.toJSDate(this)},fromJSDate:function(t){return this._calendar.fromJSDate(t)},toString:function(){return(this.year()<0?"-":"")+o(Math.abs(this.year()),4)+"-"+o(this.month(),2)+"-"+o(this.day(),2)}}),n(s.prototype,{_validateLevel:0,newDate:function(t,e,r){return null==t?this.today():(t.year&&(this._validate(t,e,r,c.local.invalidDate||c.regionalOptions[""].invalidDate),r=t.day(),e=t.month(),t=t.year()),new a(this,t,e,r))},today:function(){return this.fromJSDate(new Date)},epoch:function(t){return this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear).year()<0?this.local.epochs[0]:this.local.epochs[1]},formatYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear);return(e.year()<0?"-":"")+o(Math.abs(e.year()),4)},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear),12},monthOfYear:function(t,e){var r=this._validate(t,e,this.minDay,c.local.invalidMonth||c.regionalOptions[""].invalidMonth);return(r.month()+this.monthsInYear(r)-this.firstMonth)%this.monthsInYear(r)+this.minMonth},fromMonthOfYear:function(t,e){var r=(e+this.firstMonth-2*this.minMonth)%this.monthsInYear(t)+this.minMonth;return this._validate(t,r,this.minDay,c.local.invalidMonth||c.regionalOptions[""].invalidMonth),r},daysInYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear);return this.leapYear(e)?366:365},dayOfYear:function(t,e,r){var n=this._validate(t,e,r,c.local.invalidDate||c.regionalOptions[""].invalidDate);return n.toJD()-this.newDate(n.year(),this.fromMonthOfYear(n.year(),this.minMonth),this.minDay).toJD()+1},daysInWeek:function(){return 7},dayOfWeek:function(t,e,r){var n=this._validate(t,e,r,c.local.invalidDate||c.regionalOptions[""].invalidDate);return(Math.floor(this.toJD(n))+2)%this.daysInWeek()},extraInfo:function(t,e,r){return this._validate(t,e,r,c.local.invalidDate||c.regionalOptions[""].invalidDate),{}},add:function(t,e,r){return this._validate(t,this.minMonth,this.minDay,c.local.invalidDate||c.regionalOptions[""].invalidDate),this._correctAdd(t,this._add(t,e,r),e,r)},_add:function(t,e,r){if(this._validateLevel++,"d"===r||"w"===r){var n=t.toJD()+e*("w"===r?this.daysInWeek():1),i=t.calendar().fromJD(n);return this._validateLevel--,[i.year(),i.month(),i.day()]}try{var a=t.year()+("y"===r?e:0),o=t.monthOfYear()+("m"===r?e:0);i=t.day();"y"===r?(t.month()!==this.fromMonthOfYear(a,o)&&(o=this.newDate(a,t.month(),this.minDay).monthOfYear()),o=Math.min(o,this.monthsInYear(a)),i=Math.min(i,this.daysInMonth(a,this.fromMonthOfYear(a,o)))):"m"===r&&(!function(t){for(;oe-1+t.minMonth;)a++,o-=e,e=t.monthsInYear(a)}(this),i=Math.min(i,this.daysInMonth(a,this.fromMonthOfYear(a,o))));var s=[a,this.fromMonthOfYear(a,o),i];return this._validateLevel--,s}catch(t){throw this._validateLevel--,t}},_correctAdd:function(t,e,r,n){if(!(this.hasYearZero||"y"!==n&&"m"!==n||0!==e[0]&&t.year()>0==e[0]>0)){var i={y:[1,1,"y"],m:[1,this.monthsInYear(-1),"m"],w:[this.daysInWeek(),this.daysInYear(-1),"d"],d:[1,this.daysInYear(-1),"d"]}[n],a=r<0?-1:1;e=this._add(t,r*i[0]+a*i[1],i[2])}return t.date(e[0],e[1],e[2])},set:function(t,e,r){this._validate(t,this.minMonth,this.minDay,c.local.invalidDate||c.regionalOptions[""].invalidDate);var n="y"===r?e:t.year(),i="m"===r?e:t.month(),a="d"===r?e:t.day();return"y"!==r&&"m"!==r||(a=Math.min(a,this.daysInMonth(n,i))),t.date(n,i,a)},isValid:function(t,e,r){this._validateLevel++;var n=this.hasYearZero||0!==t;if(n){var i=this.newDate(t,e,this.minDay);n=e>=this.minMonth&&e-this.minMonth=this.minDay&&r-this.minDay13.5?13:1),c=i-(l>2.5?4716:4715);return c<=0&&c--,this.newDate(c,l,s)},toJSDate:function(t,e,r){var n=this._validate(t,e,r,c.local.invalidDate||c.regionalOptions[""].invalidDate),i=new Date(n.year(),n.month()-1,n.day());return i.setHours(0),i.setMinutes(0),i.setSeconds(0),i.setMilliseconds(0),i.setHours(i.getHours()>12?i.getHours()+2:0),i},fromJSDate:function(t){return this.newDate(t.getFullYear(),t.getMonth()+1,t.getDate())}});var c=e.exports=new i;c.cdate=a,c.baseCalendar=s,c.calendars.gregorian=l},{"object-assign":242}],342:[function(t,e,r){var n=t("object-assign"),i=t("./main");n(i.regionalOptions[""],{invalidArguments:"Invalid arguments",invalidFormat:"Cannot format a date from another calendar",missingNumberAt:"Missing number at position {0}",unknownNameAt:"Unknown name at position {0}",unexpectedLiteralAt:"Unexpected literal at position {0}",unexpectedText:"Additional text found at end"}),i.local=i.regionalOptions[""],n(i.cdate.prototype,{formatDate:function(t,e){return"string"!=typeof t&&(e=t,t=""),this._calendar.formatDate(t||"",this,e)}}),n(i.baseCalendar.prototype,{UNIX_EPOCH:i.instance().newDate(1970,1,1).toJD(),SECS_PER_DAY:86400,TICKS_EPOCH:i.instance().jdEpoch,TICKS_PER_DAY:864e9,ATOM:"yyyy-mm-dd",COOKIE:"D, dd M yyyy",FULL:"DD, MM d, yyyy",ISO_8601:"yyyy-mm-dd",JULIAN:"J",RFC_822:"D, d M yy",RFC_850:"DD, dd-M-yy",RFC_1036:"D, d M yy",RFC_1123:"D, d M yyyy",RFC_2822:"D, d M yyyy",RSS:"D, d M yy",TICKS:"!",TIMESTAMP:"@",W3C:"yyyy-mm-dd",formatDate:function(t,e,r){if("string"!=typeof t&&(r=e,e=t,t=""),!e)return"";if(e.calendar()!==this)throw i.local.invalidFormat||i.regionalOptions[""].invalidFormat;t=t||this.local.dateFormat;for(var n,a,o,s,l=(r=r||{}).dayNamesShort||this.local.dayNamesShort,c=r.dayNames||this.local.dayNames,u=r.monthNumbers||this.local.monthNumbers,f=r.monthNamesShort||this.local.monthNamesShort,h=r.monthNames||this.local.monthNames,p=(r.calculateWeek||this.local.calculateWeek,function(e,r){for(var n=1;w+n1}),d=function(t,e,r,n){var i=""+e;if(p(t,n))for(;i.length1},x=function(t,r){var n=y(t,r),a=[2,3,n?4:2,n?4:2,10,11,20]["oyYJ@!".indexOf(t)+1],o=new RegExp("^-?\\d{1,"+a+"}"),s=e.substring(A).match(o);if(!s)throw(i.local.missingNumberAt||i.regionalOptions[""].missingNumberAt).replace(/\{0\}/,A);return A+=s[0].length,parseInt(s[0],10)},b=this,_=function(){if("function"==typeof l){y("m");var t=l.call(b,e.substring(A));return A+=t.length,t}return x("m")},w=function(t,r,n,a){for(var o=y(t,a)?n:r,s=0;s-1){p=1,d=m;for(var E=this.daysInMonth(h,p);d>E;E=this.daysInMonth(h,p))p++,d-=E}return f>-1?this.fromJD(f):this.newDate(h,p,d)},determineDate:function(t,e,r,n,i){r&&"object"!=typeof r&&(i=n,n=r,r=null),"string"!=typeof n&&(i=n,n="");var a=this;return e=e?e.newDate():null,t=null==t?e:"string"==typeof t?function(t){try{return a.parseDate(n,t,i)}catch(t){}for(var e=((t=t.toLowerCase()).match(/^c/)&&r?r.newDate():null)||a.today(),o=/([+-]?[0-9]+)\s*(d|w|m|y)?/g,s=o.exec(t);s;)e.add(parseInt(s[1],10),s[2]||"d"),s=o.exec(t);return e}(t):"number"==typeof t?isNaN(t)||t===1/0||t===-1/0?e:a.today().add(t,"d"):a.newDate(t)}})},{"./main":341,"object-assign":242}],343:[function(t,e,r){"use strict";e.exports=[{path:"",backoff:0},{path:"M-2.4,-3V3L0.6,0Z",backoff:.6},{path:"M-3.7,-2.5V2.5L1.3,0Z",backoff:1.3},{path:"M-4.45,-3L-1.65,-0.2V0.2L-4.45,3L1.55,0Z",backoff:1.55},{path:"M-2.2,-2.2L-0.2,-0.2V0.2L-2.2,2.2L-1.4,3L1.6,0L-1.4,-3Z",backoff:1.6},{path:"M-4.4,-2.1L-0.6,-0.2V0.2L-4.4,2.1L-4,3L2,0L-4,-3Z",backoff:2},{path:"M2,0A2,2 0 1,1 0,-2A2,2 0 0,1 2,0Z",backoff:0,noRotate:!0},{path:"M2,2V-2H-2V2Z",backoff:0,noRotate:!0}]},{}],344:[function(t,e,r){"use strict";var n=t("./arrow_paths"),i=t("../../plots/font_attributes"),a=t("../../plots/cartesian/constants"),o=t("../../plot_api/plot_template").templatedArray;t("../../constants/axis_placeable_objects");e.exports=o("annotation",{visible:{valType:"boolean",dflt:!0,editType:"calc+arraydraw"},text:{valType:"string",editType:"calc+arraydraw"},textangle:{valType:"angle",dflt:0,editType:"calc+arraydraw"},font:i({editType:"calc+arraydraw",colorEditType:"arraydraw"}),width:{valType:"number",min:1,dflt:null,editType:"calc+arraydraw"},height:{valType:"number",min:1,dflt:null,editType:"calc+arraydraw"},opacity:{valType:"number",min:0,max:1,dflt:1,editType:"arraydraw"},align:{valType:"enumerated",values:["left","center","right"],dflt:"center",editType:"arraydraw"},valign:{valType:"enumerated",values:["top","middle","bottom"],dflt:"middle",editType:"arraydraw"},bgcolor:{valType:"color",dflt:"rgba(0,0,0,0)",editType:"arraydraw"},bordercolor:{valType:"color",dflt:"rgba(0,0,0,0)",editType:"arraydraw"},borderpad:{valType:"number",min:0,dflt:1,editType:"calc+arraydraw"},borderwidth:{valType:"number",min:0,dflt:1,editType:"calc+arraydraw"},showarrow:{valType:"boolean",dflt:!0,editType:"calc+arraydraw"},arrowcolor:{valType:"color",editType:"arraydraw"},arrowhead:{valType:"integer",min:0,max:n.length,dflt:1,editType:"arraydraw"},startarrowhead:{valType:"integer",min:0,max:n.length,dflt:1,editType:"arraydraw"},arrowside:{valType:"flaglist",flags:["end","start"],extras:["none"],dflt:"end",editType:"arraydraw"},arrowsize:{valType:"number",min:.3,dflt:1,editType:"calc+arraydraw"},startarrowsize:{valType:"number",min:.3,dflt:1,editType:"calc+arraydraw"},arrowwidth:{valType:"number",min:.1,editType:"calc+arraydraw"},standoff:{valType:"number",min:0,dflt:0,editType:"calc+arraydraw"},startstandoff:{valType:"number",min:0,dflt:0,editType:"calc+arraydraw"},ax:{valType:"any",editType:"calc+arraydraw"},ay:{valType:"any",editType:"calc+arraydraw"},axref:{valType:"enumerated",dflt:"pixel",values:["pixel",a.idRegex.x.toString()],editType:"calc"},ayref:{valType:"enumerated",dflt:"pixel",values:["pixel",a.idRegex.y.toString()],editType:"calc"},xref:{valType:"enumerated",values:["paper",a.idRegex.x.toString()],editType:"calc"},x:{valType:"any",editType:"calc+arraydraw"},xanchor:{valType:"enumerated",values:["auto","left","center","right"],dflt:"auto",editType:"calc+arraydraw"},xshift:{valType:"number",dflt:0,editType:"calc+arraydraw"},yref:{valType:"enumerated",values:["paper",a.idRegex.y.toString()],editType:"calc"},y:{valType:"any",editType:"calc+arraydraw"},yanchor:{valType:"enumerated",values:["auto","top","middle","bottom"],dflt:"auto",editType:"calc+arraydraw"},yshift:{valType:"number",dflt:0,editType:"calc+arraydraw"},clicktoshow:{valType:"enumerated",values:[!1,"onoff","onout"],dflt:!1,editType:"arraydraw"},xclick:{valType:"any",editType:"arraydraw"},yclick:{valType:"any",editType:"arraydraw"},hovertext:{valType:"string",editType:"arraydraw"},hoverlabel:{bgcolor:{valType:"color",editType:"arraydraw"},bordercolor:{valType:"color",editType:"arraydraw"},font:i({editType:"arraydraw"}),editType:"arraydraw"},captureevents:{valType:"boolean",editType:"arraydraw"},editType:"calc",_deprecated:{ref:{valType:"string",editType:"calc"}}})},{"../../constants/axis_placeable_objects":467,"../../plot_api/plot_template":538,"../../plots/cartesian/constants":556,"../../plots/font_attributes":580,"./arrow_paths":343}],345:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../plots/cartesian/axes"),a=t("./draw").draw;function o(t){var e=t._fullLayout;n.filterVisible(e.annotations).forEach((function(e){var r=i.getFromId(t,e.xref),n=i.getFromId(t,e.yref),a=i.getRefType(e.xref),o=i.getRefType(e.yref);e._extremes={},"range"===a&&s(e,r),"range"===o&&s(e,n)}))}function s(t,e){var r,n=e._id,a=n.charAt(0),o=t[a],s=t["a"+a],l=t[a+"ref"],c=t["a"+a+"ref"],u=t["_"+a+"padplus"],f=t["_"+a+"padminus"],h={x:1,y:-1}[a]*t[a+"shift"],p=3*t.arrowsize*t.arrowwidth||0,d=p+h,m=p-h,g=3*t.startarrowsize*t.arrowwidth||0,v=g+h,y=g-h;if(c===l){var x=i.findExtremes(e,[e.r2c(o)],{ppadplus:d,ppadminus:m}),b=i.findExtremes(e,[e.r2c(s)],{ppadplus:Math.max(u,v),ppadminus:Math.max(f,y)});r={min:[x.min[0],b.min[0]],max:[x.max[0],b.max[0]]}}else v=s?v+s:v,y=s?y-s:y,r=i.findExtremes(e,[e.r2c(o)],{ppadplus:Math.max(u,d,v),ppadminus:Math.max(f,m,y)});t._extremes[n]=r}e.exports=function(t){var e=t._fullLayout;if(n.filterVisible(e.annotations).length&&t._fullData.length)return n.syncOrAsync([a,o],t)}},{"../../lib":498,"../../plots/cartesian/axes":549,"./draw":350}],346:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../registry"),a=t("../../plot_api/plot_template").arrayEditor;function o(t,e){var r,n,i,a,o,l,c,u=t._fullLayout.annotations,f=[],h=[],p=[],d=(e||[]).length;for(r=0;r0||r.explicitOff.length>0},onClick:function(t,e){var r,s,l=o(t,e),c=l.on,u=l.off.concat(l.explicitOff),f={},h=t._fullLayout.annotations;if(!c.length&&!u.length)return;for(r=0;r2/3?"right":"center"),{center:0,middle:0,left:.5,bottom:-.5,right:-.5,top:.5}[e]}for(var W=!1,X=["x","y"],Z=0;Z1)&&(nt===rt?((pt=it.r2fraction(e["a"+et]))<0||pt>1)&&(W=!0):W=!0),J=it._offset+it.r2p(e[et]),$=.5}else{var dt="domain"===ht;"x"===et?(Q=e[et],J=dt?it._offset+it._length*Q:J=T.l+T.w*Q):(Q=1-e[et],J=dt?it._offset+it._length*Q:J=T.t+T.h*Q),$=e.showarrow?.5:Q}if(e.showarrow){ft.head=J;var mt=e["a"+et];if(tt=ot*q(.5,e.xanchor)-st*q(.5,e.yanchor),nt===rt){var gt=l.getRefType(nt);"domain"===gt?("y"===et&&(mt=1-mt),ft.tail=it._offset+it._length*mt):"paper"===gt?"y"===et?(mt=1-mt,ft.tail=T.t+T.h*mt):ft.tail=T.l+T.w*mt:ft.tail=it._offset+it.r2p(mt),K=tt}else ft.tail=J+mt,K=tt+mt;ft.text=ft.tail+tt;var vt=w["x"===et?"width":"height"];if("paper"===rt&&(ft.head=o.constrain(ft.head,1,vt-1)),"pixel"===nt){var yt=-Math.max(ft.tail-3,ft.text),xt=Math.min(ft.tail+3,ft.text)-vt;yt>0?(ft.tail+=yt,ft.text+=yt):xt>0&&(ft.tail-=xt,ft.text-=xt)}ft.tail+=ut,ft.head+=ut}else K=tt=lt*q($,ct),ft.text=J+tt;ft.text+=ut,tt+=ut,K+=ut,e["_"+et+"padplus"]=lt/2+K,e["_"+et+"padminus"]=lt/2-K,e["_"+et+"size"]=lt,e["_"+et+"shift"]=tt}if(W)R.remove();else{var bt=0,_t=0;if("left"!==e.align&&(bt=(A-b)*("center"===e.align?.5:1)),"top"!==e.valign&&(_t=(D-_)*("middle"===e.valign?.5:1)),f)n.select("svg").attr({x:N+bt-1,y:N+_t}).call(u.setClipUrl,U?L:null,t);else{var wt=N+_t-m.top,Tt=N+bt-m.left;G.call(h.positionText,Tt,wt).call(u.setClipUrl,U?L:null,t)}V.select("rect").call(u.setRect,N,N,A,D),j.call(u.setRect,F/2,F/2,B-F,H-F),R.call(u.setTranslate,Math.round(C.x.text-B/2),Math.round(C.y.text-H/2)),O.attr({transform:"rotate("+P+","+C.x.text+","+C.y.text+")"});var kt,At=function(r,n){I.selectAll(".annotation-arrow-g").remove();var l=C.x.head,f=C.y.head,h=C.x.tail+r,p=C.y.tail+n,m=C.x.text+r,b=C.y.text+n,_=o.rotationXYMatrix(P,m,b),w=o.apply2DTransform(_),A=o.apply2DTransform2(_),L=+j.attr("width"),z=+j.attr("height"),D=m-.5*L,F=D+L,B=b-.5*z,N=B+z,U=[[D,B,D,N],[D,N,F,N],[F,N,F,B],[F,B,D,B]].map(A);if(!U.reduce((function(t,e){return t^!!o.segmentsIntersect(l,f,l+1e6,f+1e6,e[0],e[1],e[2],e[3])}),!1)){U.forEach((function(t){var e=o.segmentsIntersect(h,p,l,f,t[0],t[1],t[2],t[3]);e&&(h=e.x,p=e.y)}));var V=e.arrowwidth,H=e.arrowcolor,q=e.arrowside,G=I.append("g").style({opacity:c.opacity(H)}).classed("annotation-arrow-g",!0),Y=G.append("path").attr("d","M"+h+","+p+"L"+l+","+f).style("stroke-width",V+"px").call(c.stroke,c.rgb(H));if(g(Y,q,e),k.annotationPosition&&Y.node().parentNode&&!a){var W=l,X=f;if(e.standoff){var Z=Math.sqrt(Math.pow(l-h,2)+Math.pow(f-p,2));W+=e.standoff*(h-l)/Z,X+=e.standoff*(p-f)/Z}var J,K,Q=G.append("path").classed("annotation-arrow",!0).classed("anndrag",!0).classed("cursor-move",!0).attr({d:"M3,3H-3V-3H3ZM0,0L"+(h-W)+","+(p-X),transform:s(W,X)}).style("stroke-width",V+6+"px").call(c.stroke,"rgba(0,0,0,0)").call(c.fill,"rgba(0,0,0,0)");d.init({element:Q.node(),gd:t,prepFn:function(){var t=u.getTranslate(R);J=t.x,K=t.y,v&&v.autorange&&M(v._name+".autorange",!0),x&&x.autorange&&M(x._name+".autorange",!0)},moveFn:function(t,r){var n=w(J,K),i=n[0]+t,a=n[1]+r;R.call(u.setTranslate,i,a),S("x",y(v,t,"x",T,e)),S("y",y(x,r,"y",T,e)),e.axref===e.xref&&S("ax",y(v,t,"ax",T,e)),e.ayref===e.yref&&S("ay",y(x,r,"ay",T,e)),G.attr("transform",s(t,r)),O.attr({transform:"rotate("+P+","+i+","+a+")"})},doneFn:function(){i.call("_guiRelayout",t,E());var e=document.querySelector(".js-notes-box-panel");e&&e.redraw(e.selectedObj)}})}}};if(e.showarrow&&At(0,0),z)d.init({element:R.node(),gd:t,prepFn:function(){kt=O.attr("transform")},moveFn:function(t,r){var n="pointer";if(e.showarrow)e.axref===e.xref?S("ax",y(v,t,"ax",T,e)):S("ax",e.ax+t),e.ayref===e.yref?S("ay",y(x,r,"ay",T.w,e)):S("ay",e.ay+r),At(t,r);else{if(a)return;var i,o;if(v)i=y(v,t,"x",T,e);else{var l=e._xsize/T.w,c=e.x+(e._xshift-e.xshift)/T.w-l/2;i=d.align(c+t/T.w,l,0,1,e.xanchor)}if(x)o=y(x,r,"y",T,e);else{var u=e._ysize/T.h,f=e.y-(e._yshift+e.yshift)/T.h-u/2;o=d.align(f-r/T.h,u,0,1,e.yanchor)}S("x",i),S("y",o),v&&x||(n=d.getCursor(v?.5:i,x?.5:o,e.xanchor,e.yanchor))}O.attr({transform:s(t,r)+kt}),p(R,n)},clickFn:function(r,n){e.captureevents&&t.emit("plotly_clickannotation",Y(n))},doneFn:function(){p(R),i.call("_guiRelayout",t,E());var e=document.querySelector(".js-notes-box-panel");e&&e.redraw(e.selectedObj)}})}}}e.exports={draw:function(t){var e=t._fullLayout;e._infolayer.selectAll(".annotation").remove();for(var r=0;r=0,x=e.indexOf("end")>=0,b=d.backoff*g+r.standoff,_=m.backoff*v+r.startstandoff;if("line"===p.nodeName){o={x:+t.attr("x1"),y:+t.attr("y1")},u={x:+t.attr("x2"),y:+t.attr("y2")};var w=o.x-u.x,T=o.y-u.y;if(h=(f=Math.atan2(T,w))+Math.PI,b&&_&&b+_>Math.sqrt(w*w+T*T))return void z();if(b){if(b*b>w*w+T*T)return void z();var k=b*Math.cos(f),A=b*Math.sin(f);u.x+=k,u.y+=A,t.attr({x2:u.x,y2:u.y})}if(_){if(_*_>w*w+T*T)return void z();var M=_*Math.cos(f),S=_*Math.sin(f);o.x-=M,o.y-=S,t.attr({x1:o.x,y1:o.y})}}else if("path"===p.nodeName){var E=p.getTotalLength(),L="";if(E1){c=!0;break}}c?t.fullLayout._infolayer.select(".annotation-"+t.id+'[data-index="'+s+'"]').remove():(l._pdata=i(t.glplot.cameraParams,[e.xaxis.r2l(l.x)*r[0],e.yaxis.r2l(l.y)*r[1],e.zaxis.r2l(l.z)*r[2]]),n(t.graphDiv,l,s,t.id,l._xa,l._ya))}}},{"../../plots/gl3d/project":602,"../annotations/draw":350}],357:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../lib");e.exports={moduleType:"component",name:"annotations3d",schema:{subplots:{scene:{annotations:t("./attributes")}}},layoutAttributes:t("./attributes"),handleDefaults:t("./defaults"),includeBasePlot:function(t,e){var r=n.subplotsRegistry.gl3d;if(!r)return;for(var a=r.attrRegex,o=Object.keys(t),s=0;s=0))return t;if(3===o)n[o]>1&&(n[o]=1);else if(n[o]>=1)return t}var s=Math.round(255*n[0])+", "+Math.round(255*n[1])+", "+Math.round(255*n[2]);return a?"rgba("+s+", "+n[3]+")":"rgb("+s+")"}o.tinyRGB=function(t){var e=t.toRgb();return"rgb("+Math.round(e.r)+", "+Math.round(e.g)+", "+Math.round(e.b)+")"},o.rgb=function(t){return o.tinyRGB(n(t))},o.opacity=function(t){return t?n(t).getAlpha():0},o.addOpacity=function(t,e){var r=n(t).toRgb();return"rgba("+Math.round(r.r)+", "+Math.round(r.g)+", "+Math.round(r.b)+", "+e+")"},o.combine=function(t,e){var r=n(t).toRgb();if(1===r.a)return n(t).toRgbString();var i=n(e||c).toRgb(),a=1===i.a?i:{r:255*(1-i.a)+i.r*i.a,g:255*(1-i.a)+i.g*i.a,b:255*(1-i.a)+i.b*i.a},o={r:a.r*(1-r.a)+r.r*r.a,g:a.g*(1-r.a)+r.g*r.a,b:a.b*(1-r.a)+r.b*r.a};return n(o).toRgbString()},o.contrast=function(t,e,r){var i=n(t);return 1!==i.getAlpha()&&(i=n(o.combine(t,c))),(i.isDark()?e?i.lighten(e):c:r?i.darken(r):l).toString()},o.stroke=function(t,e){var r=n(e);t.style({stroke:o.tinyRGB(r),"stroke-opacity":r.getAlpha()})},o.fill=function(t,e){var r=n(e);t.style({fill:o.tinyRGB(r),"fill-opacity":r.getAlpha()})},o.clean=function(t){if(t&&"object"==typeof t){var e,r,n,i,s=Object.keys(t);for(e=0;e0?n>=l:n<=l));i++)n>u&&n0?n>=l:n<=l));i++)n>r[0]&&n1){var ct=Math.pow(10,Math.floor(Math.log(lt)/Math.LN10));ot*=ct*c.roundUp(lt/ct,[2,5,10]),(Math.abs(q.start)/q.size+1e-6)%1<2e-6&&(it.tick0=0)}it.dtick=ot}it.domain=o?[rt+I/R.h,rt+K-I/R.h]:[rt+P/R.w,rt+K-P/R.w],it.setScale(),t.attr("transform",u(Math.round(R.l),Math.round(R.t)));var ut,ft=t.select("."+A.cbtitleunshift).attr("transform",u(-Math.round(R.l),-Math.round(R.t))),ht=it.ticklabelposition,pt=it.title.font.size,dt=t.select("."+A.cbaxis),mt=0,gt=0;function vt(n,i){var a={propContainer:it,propName:e._propPrefix+"title",traceIndex:e._traceIndex,_meta:e._meta,placeholder:D._dfltTitle.colorbar,containerGroup:t.select("."+A.cbtitle)},o="h"===n.charAt(0)?n.substr(1):"h"+n;t.selectAll("."+o+",."+o+"-math-group").remove(),m.draw(r,n,f(a,i||{}))}return c.syncOrAsync([a.previousPromises,function(){var t,e;(o&&at||!o&&!at)&&("top"===j&&(t=P+R.l+R.w*O,e=I+R.t+R.h*(1-rt-K)+3+.75*pt),"bottom"===j&&(t=P+R.l+R.w*O,e=I+R.t+R.h*(1-rt)-3-.25*pt),"right"===j&&(e=I+R.t+R.h*z+3+.75*pt,t=P+R.l+R.w*rt),vt(it._id+"title",{attributes:{x:t,y:e,"text-anchor":o?"start":"middle"}}))},function(){if(!o&&!at||o&&at){var a,l=t.select("."+A.cbtitle),f=l.select("text"),h=[-M/2,M/2],d=l.select(".h"+it._id+"title-math-group").node(),m=15.6;if(f.node()&&(m=parseInt(f.node().style.fontSize,10)*w),d?(a=p.bBox(d),gt=a.width,(mt=a.height)>m&&(h[1]-=(mt-m)/2)):f.node()&&!f.classed(A.jsPlaceholder)&&(a=p.bBox(f.node()),gt=a.width,mt=a.height),o){if(mt){if(mt+=5,"top"===j)it.domain[1]-=mt/R.h,h[1]*=-1;else{it.domain[0]+=mt/R.h;var v=g.lineCount(f);h[1]+=(1-v)*m}l.attr("transform",u(h[0],h[1])),it.setScale()}}else gt&&("right"===j&&(it.domain[0]+=(gt+pt/2)/R.w),l.attr("transform",u(h[0],h[1])),it.setScale())}t.selectAll("."+A.cbfills+",."+A.cblines).attr("transform",o?u(0,Math.round(R.h*(1-it.domain[1]))):u(Math.round(R.w*it.domain[0]),0)),dt.attr("transform",o?u(0,Math.round(-R.t)):u(Math.round(-R.l),0));var y=t.select("."+A.cbfills).selectAll("rect."+A.cbfill).attr("style","").data(Y);y.enter().append("rect").classed(A.cbfill,!0).style("stroke","none"),y.exit().remove();var x=U.map(it.c2p).map(Math.round).sort((function(t,e){return t-e}));y.each((function(t,a){var s=[0===a?U[0]:(Y[a]+Y[a-1])/2,a===Y.length-1?U[1]:(Y[a]+Y[a+1])/2].map(it.c2p).map(Math.round);o&&(s[1]=c.constrain(s[1]+(s[1]>s[0])?1:-1,x[0],x[1]));var l=n.select(this).attr(o?"x":"y",Q).attr(o?"y":"x",n.min(s)).attr(o?"width":"height",Math.max(X,2)).attr(o?"height":"width",Math.max(n.max(s)-n.min(s),2));if(e._fillgradient)p.gradient(l,r,e._id,o?"vertical":"horizontalreversed",e._fillgradient,"fill");else{var u=H(t).replace("e-","");l.attr("fill",i(u).toHexString())}}));var b=t.select("."+A.cblines).selectAll("path."+A.cbline).data(B.color&&B.width?W:[]);b.enter().append("path").classed(A.cbline,!0),b.exit().remove(),b.each((function(t){var e=Q,r=Math.round(it.c2p(t))+B.width/2%1;n.select(this).attr("d","M"+(o?e+","+r:r+","+e)+(o?"h":"v")+X).call(p.lineGroupStyle,B.width,V(t),B.dash)})),dt.selectAll("g."+it._id+"tick,path").remove();var _=Q+X+(M||0)/2-("outside"===e.ticks?1:0),T=s.calcTicks(it),k=s.getTickSigns(it)[2];return s.drawTicks(r,it,{vals:"inside"===it.ticks?s.clipEnds(it,T):T,layer:dt,path:s.makeTickPath(it,_,k),transFn:s.makeTransTickFn(it)}),s.drawLabels(r,it,{vals:T,layer:dt,transFn:s.makeTransTickLabelFn(it),labelFns:s.makeLabelFns(it,_)})},function(){if(o&&!at||!o&&at){var t,i,a=it.position||0,s=it._offset+it._length/2;if("right"===j)i=s,t=R.l+R.w*a+10+pt*(it.showticklabels?1:.5);else if(t=s,"bottom"===j&&(i=R.t+R.h*a+10+(-1===ht.indexOf("inside")?it.tickfont.size:0)+("intside"!==it.ticks&&e.ticklen||0)),"top"===j){var l=N.text.split("
").length;i=R.t+R.h*a+10-X-w*pt*l}vt((o?"h":"v")+it._id+"title",{avoid:{selection:n.select(r).selectAll("g."+it._id+"tick"),side:j,offsetTop:o?0:R.t,offsetLeft:o?R.l:0,maxShift:o?D.width:D.height},attributes:{x:t,y:i,"text-anchor":"middle"},transform:{rotate:o?-90:0,offset:0}})}},a.previousPromises,function(){var n,s=X+M/2;-1===ht.indexOf("inside")&&(n=p.bBox(dt.node()),s+=o?n.width:n.height),ut=ft.select("text");var c=0,f=o&&"top"===j,m=!o&&"right"===j,g=0;if(ut.node()&&!ut.classed(A.jsPlaceholder)){var y,x=ft.select(".h"+it._id+"title-math-group").node();x&&(o&&at||!o&&!at)?(c=(n=p.bBox(x)).width,y=n.height):(c=(n=p.bBox(ft.node())).right-R.l-(o?Q:nt),y=n.bottom-R.t-(o?nt:Q),o||"top"!==j||(s+=n.height,g=n.height)),m&&(ut.attr("transform",u(c/2+pt/2,0)),c*=2),s=Math.max(s,o?c:y)}var b=2*(o?P:I)+s+S+M/2,w=0;!o&&N.text&&"bottom"===C&&z<=0&&(b+=w=b/2,g+=w),D._hColorbarMoveTitle=w,D._hColorbarMoveCBTitle=g;var F=S+M;t.select("."+A.cbbg).attr("x",(o?Q:nt)-F/2-(o?P:0)).attr("y",(o?nt:Q)-(o?J:I+g-w)).attr(o?"width":"height",Math.max(b-w,2)).attr(o?"height":"width",Math.max(J+F,2)).call(d.fill,E).call(d.stroke,e.bordercolor).style("stroke-width",S);var B=m?Math.max(c-10,0):0;if(t.selectAll("."+A.cboutline).attr("x",(o?Q:nt+P)+B).attr("y",(o?nt+I-J:Q)+(f?mt:0)).attr(o?"width":"height",Math.max(X,2)).attr(o?"height":"width",Math.max(J-(o?2*I+mt:2*P+B),2)).call(d.stroke,e.outlinecolor).style({fill:"none","stroke-width":M}),t.attr("transform",u(R.l-(o?$*b:0),R.t-(o?0:(1-tt)*b-g))),!o&&(S||i(E).getAlpha()&&!i.equals(D.paper_bgcolor,E))){var U=dt.selectAll("text"),V=U[0].length,H=t.select("."+A.cbbg).node(),q=p.bBox(H),G=p.getTranslate(t);U.each((function(t,e){var r=V-1;if(0===e||e===r){var n,i=p.bBox(this),a=p.getTranslate(this);if(e===r){var o=i.right+a.x;(n=q.right+G.x+nt-S-2+O-o)>0&&(n=0)}else if(0===e){var s=i.left+a.x;(n=q.left+G.x+nt+S+2-s)<0&&(n=0)}n&&(V<3?this.setAttribute("transform","translate("+n+",0) "+this.getAttribute("transform")):this.setAttribute("visibility","hidden"))}}))}var Y={},W=T[L],Z=k[L],K=T[C],et=k[C],rt=b-X;o?("pixels"===h?(Y.y=z,Y.t=J*K,Y.b=J*et):(Y.t=Y.b=0,Y.yt=z+l*K,Y.yb=z-l*et),"pixels"===_?(Y.x=O,Y.l=b*W,Y.r=b*Z):(Y.l=rt*W,Y.r=rt*Z,Y.xl=O-v*W,Y.xr=O+v*Z)):("pixels"===h?(Y.x=O,Y.l=J*W,Y.r=J*Z):(Y.l=Y.r=0,Y.xl=O+l*W,Y.xr=O-l*Z),"pixels"===_?(Y.y=1-z,Y.t=b*K,Y.b=b*et):(Y.t=rt*K,Y.b=rt*et,Y.yt=z-v*K,Y.yb=z+v*et)),a.autoMargin(r,e._id,Y)}],r)}(r,e,t);v&&v.then&&(t._promises||[]).push(v),t._context.edits.colorbarPosition&&function(t,e,r){var n,i,a,s="v"===e.orientation,c=r._fullLayout._size;l.init({element:t.node(),gd:r,prepFn:function(){n=t.attr("transform"),h(t)},moveFn:function(r,o){t.attr("transform",n+u(r,o)),i=l.align((s?e._uFrac:e._vFrac)+r/c.w,s?e._thickFrac:e._lenFrac,0,1,e.xanchor),a=l.align((s?e._vFrac:1-e._uFrac)-o/c.h,s?e._lenFrac:e._thickFrac,0,1,e.yanchor);var f=l.getCursor(i,a,e.xanchor,e.yanchor);h(t,f)},doneFn:function(){if(h(t),void 0!==i&&void 0!==a){var n={};n[e._propPrefix+"x"]=i,n[e._propPrefix+"y"]=a,void 0!==e._traceIndex?o.call("_guiRestyle",r,n,e._traceIndex):o.call("_guiRelayout",r,n)}}})}(r,e,t)})),e.exit().each((function(e){a.autoMargin(t,e._id)})).remove(),e.order()}}},{"../../constants/alignment":466,"../../lib":498,"../../lib/extend":488,"../../lib/setcursor":519,"../../lib/svg_text_utils":524,"../../plots/cartesian/axes":549,"../../plots/cartesian/axis_defaults":551,"../../plots/cartesian/layout_attributes":564,"../../plots/cartesian/position_defaults":567,"../../plots/plots":614,"../../registry":633,"../color":361,"../colorscale/helpers":372,"../dragelement":380,"../drawing":383,"../titles":459,"./constants":363,"@plotly/d3":58,tinycolor2:307}],366:[function(t,e,r){"use strict";var n=t("../../lib");e.exports=function(t){return n.isPlainObject(t.colorbar)}},{"../../lib":498}],367:[function(t,e,r){"use strict";e.exports={moduleType:"component",name:"colorbar",attributes:t("./attributes"),supplyDefaults:t("./defaults"),draw:t("./draw").draw,hasColorbar:t("./has_colorbar")}},{"./attributes":362,"./defaults":364,"./draw":365,"./has_colorbar":366}],368:[function(t,e,r){"use strict";var n=t("../colorbar/attributes"),i=t("../../lib/regex").counter,a=t("../../lib/sort_object_keys"),o=t("./scales.js").scales;a(o);function s(t){return"`"+t+"`"}e.exports=function(t,e){t=t||"";var r,a=(e=e||{}).cLetter||"c",l=("onlyIfNumerical"in e?e.onlyIfNumerical:Boolean(t),"noScale"in e?e.noScale:"marker.line"===t),c="showScaleDflt"in e?e.showScaleDflt:"z"===a,u="string"==typeof e.colorscaleDflt?o[e.colorscaleDflt]:null,f=e.editTypeOverride||"",h=t?t+".":"";"colorAttr"in e?(r=e.colorAttr,e.colorAttr):s(h+(r={z:"z",c:"color"}[a]));var p=a+"auto",d=a+"min",m=a+"max",g=a+"mid",v=(s(h+p),s(h+d),s(h+m),{});v[d]=v[m]=void 0;var y={};y[p]=!1;var x={};return"color"===r&&(x.color={valType:"color",arrayOk:!0,editType:f||"style"},e.anim&&(x.color.anim=!0)),x[p]={valType:"boolean",dflt:!0,editType:"calc",impliedEdits:v},x[d]={valType:"number",dflt:null,editType:f||"plot",impliedEdits:y},x[m]={valType:"number",dflt:null,editType:f||"plot",impliedEdits:y},x[g]={valType:"number",dflt:null,editType:"calc",impliedEdits:v},x.colorscale={valType:"colorscale",editType:"calc",dflt:u,impliedEdits:{autocolorscale:!1}},x.autocolorscale={valType:"boolean",dflt:!1!==e.autoColorDflt,editType:"calc",impliedEdits:{colorscale:void 0}},x.reversescale={valType:"boolean",dflt:!1,editType:"plot"},l||(x.showscale={valType:"boolean",dflt:c,editType:"calc"},x.colorbar=n),e.noColorAxis||(x.coloraxis={valType:"subplotid",regex:i("coloraxis"),dflt:null,editType:"calc"}),x}},{"../../lib/regex":515,"../../lib/sort_object_keys":521,"../colorbar/attributes":362,"./scales.js":376}],369:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("../../lib"),a=t("./helpers").extractOpts;e.exports=function(t,e,r){var o,s=t._fullLayout,l=r.vals,c=r.containerStr,u=c?i.nestedProperty(e,c).get():e,f=a(u),h=!1!==f.auto,p=f.min,d=f.max,m=f.mid,g=function(){return i.aggNums(Math.min,null,l)},v=function(){return i.aggNums(Math.max,null,l)};(void 0===p?p=g():h&&(p=u._colorAx&&n(p)?Math.min(p,g()):g()),void 0===d?d=v():h&&(d=u._colorAx&&n(d)?Math.max(d,v()):v()),h&&void 0!==m&&(d-m>m-p?p=m-(d-m):d-m=0?s.colorscale.sequential:s.colorscale.sequentialminus,f._sync("colorscale",o))}},{"../../lib":498,"./helpers":372,"fast-isnumeric":185}],370:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./helpers").hasColorscale,a=t("./helpers").extractOpts;e.exports=function(t,e){function r(t,e){var r=t["_"+e];void 0!==r&&(t[e]=r)}function o(t,i){var o=i.container?n.nestedProperty(t,i.container).get():t;if(o)if(o.coloraxis)o._colorAx=e[o.coloraxis];else{var s=a(o),l=s.auto;(l||void 0===s.min)&&r(o,i.min),(l||void 0===s.max)&&r(o,i.max),s.autocolorscale&&r(o,"colorscale")}}for(var s=0;s=0;n--,i++){var a=t[n];r[i]=[1-a[0],a[1]]}return r}function d(t,e){e=e||{};for(var r=t.domain,o=t.range,l=o.length,c=new Array(l),u=0;u4/3-s?o:s}},{}],378:[function(t,e,r){"use strict";var n=t("../../lib"),i=[["sw-resize","s-resize","se-resize"],["w-resize","move","e-resize"],["nw-resize","n-resize","ne-resize"]];e.exports=function(t,e,r,a){return t="left"===r?0:"center"===r?1:"right"===r?2:n.constrain(Math.floor(3*t),0,2),e="bottom"===a?0:"middle"===a?1:"top"===a?2:n.constrain(Math.floor(3*e),0,2),i[e][t]}},{"../../lib":498}],379:[function(t,e,r){"use strict";r.selectMode=function(t){return"lasso"===t||"select"===t},r.drawMode=function(t){return"drawclosedpath"===t||"drawopenpath"===t||"drawline"===t||"drawrect"===t||"drawcircle"===t},r.openMode=function(t){return"drawline"===t||"drawopenpath"===t},r.rectMode=function(t){return"select"===t||"drawline"===t||"drawrect"===t||"drawcircle"===t},r.freeMode=function(t){return"lasso"===t||"drawclosedpath"===t||"drawopenpath"===t},r.selectingOrDrawing=function(t){return r.freeMode(t)||r.rectMode(t)}},{}],380:[function(t,e,r){"use strict";var n=t("mouse-event-offset"),i=t("has-hover"),a=t("has-passive-events"),o=t("../../lib").removeElement,s=t("../../plots/cartesian/constants"),l=e.exports={};l.align=t("./align"),l.getCursor=t("./cursor");var c=t("./unhover");function u(){var t=document.createElement("div");t.className="dragcover";var e=t.style;return e.position="fixed",e.left=0,e.right=0,e.top=0,e.bottom=0,e.zIndex=999999999,e.background="none",document.body.appendChild(t),t}function f(t){return n(t.changedTouches?t.changedTouches[0]:t,document.body)}l.unhover=c.wrapped,l.unhoverRaw=c.raw,l.init=function(t){var e,r,n,c,h,p,d,m,g=t.gd,v=1,y=g._context.doubleClickDelay,x=t.element;g._mouseDownTime||(g._mouseDownTime=0),x.style.pointerEvents="all",x.onmousedown=_,a?(x._ontouchstart&&x.removeEventListener("touchstart",x._ontouchstart),x._ontouchstart=_,x.addEventListener("touchstart",_,{passive:!1})):x.ontouchstart=_;var b=t.clampFn||function(t,e,r){return Math.abs(t)y&&(v=Math.max(v-1,1)),g._dragged)t.doneFn&&t.doneFn();else if(t.clickFn&&t.clickFn(v,p),!m){var r;try{r=new MouseEvent("click",e)}catch(t){var n=f(e);(r=document.createEvent("MouseEvents")).initMouseEvent("click",e.bubbles,e.cancelable,e.view,e.detail,e.screenX,e.screenY,n[0],n[1],e.ctrlKey,e.altKey,e.shiftKey,e.metaKey,e.button,e.relatedTarget)}d.dispatchEvent(r)}g._dragging=!1,g._dragged=!1}else g._dragged=!1}},l.coverSlip=u},{"../../lib":498,"../../plots/cartesian/constants":556,"./align":377,"./cursor":378,"./unhover":381,"has-hover":223,"has-passive-events":224,"mouse-event-offset":237}],381:[function(t,e,r){"use strict";var n=t("../../lib/events"),i=t("../../lib/throttle"),a=t("../../lib/dom").getGraphDiv,o=t("../fx/constants"),s=e.exports={};s.wrapped=function(t,e,r){(t=a(t))._fullLayout&&i.clear(t._fullLayout._uid+o.HOVERID),s.raw(t,e,r)},s.raw=function(t,e){var r=t._fullLayout,i=t._hoverdata;e||(e={}),e.target&&!t._dragged&&!1===n.triggerHandler(t,"plotly_beforehover",e)||(r._hoverlayer.selectAll("g").remove(),r._hoverlayer.selectAll("line").remove(),r._hoverlayer.selectAll("circle").remove(),t._hoverdata=void 0,e.target&&i&&t.emit("plotly_unhover",{event:e,points:i}))}},{"../../lib/dom":486,"../../lib/events":487,"../../lib/throttle":525,"../fx/constants":395}],382:[function(t,e,r){"use strict";r.dash={valType:"string",values:["solid","dot","dash","longdash","dashdot","longdashdot"],dflt:"solid",editType:"style"},r.pattern={shape:{valType:"enumerated",values:["","/","\\","x","-","|","+","."],dflt:"",arrayOk:!0,editType:"style"},fillmode:{valType:"enumerated",values:["replace","overlay"],dflt:"replace",editType:"style"},bgcolor:{valType:"color",arrayOk:!0,editType:"style"},fgcolor:{valType:"color",arrayOk:!0,editType:"style"},fgopacity:{valType:"number",editType:"style",min:0,max:1},size:{valType:"number",min:0,dflt:8,arrayOk:!0,editType:"style"},solidity:{valType:"number",min:0,max:1,dflt:.3,arrayOk:!0,editType:"style"},editType:"style"}},{}],383:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=i.numberFormat,o=t("fast-isnumeric"),s=t("tinycolor2"),l=t("../../registry"),c=t("../color"),u=t("../colorscale"),f=i.strTranslate,h=t("../../lib/svg_text_utils"),p=t("../../constants/xmlns_namespaces"),d=t("../../constants/alignment").LINE_SPACING,m=t("../../constants/interactions").DESELECTDIM,g=t("../../traces/scatter/subtypes"),v=t("../../traces/scatter/make_bubble_size_func"),y=t("../../components/fx/helpers").appendArrayPointValue,x=e.exports={};function b(t,e,r){var n=e.fillpattern,i=n&&x.getPatternAttr(n.shape,0,"");if(i){var a=x.getPatternAttr(n.bgcolor,0,null),o=x.getPatternAttr(n.fgcolor,0,null),s=n.fgopacity,l=x.getPatternAttr(n.size,0,8),u=x.getPatternAttr(n.solidity,0,.3),f=e.uid;x.pattern(t,"point",r,f,i,l,u,void 0,n.fillmode,a,o,s)}else e.fillcolor&&t.call(c.fill,e.fillcolor)}x.font=function(t,e,r,n){i.isPlainObject(e)&&(n=e.color,r=e.size,e=e.family),e&&t.style("font-family",e),r+1&&t.style("font-size",r+"px"),n&&t.call(c.fill,n)},x.setPosition=function(t,e,r){t.attr("x",e).attr("y",r)},x.setSize=function(t,e,r){t.attr("width",e).attr("height",r)},x.setRect=function(t,e,r,n,i){t.call(x.setPosition,e,r).call(x.setSize,n,i)},x.translatePoint=function(t,e,r,n){var i=r.c2p(t.x),a=n.c2p(t.y);return!!(o(i)&&o(a)&&e.node())&&("text"===e.node().nodeName?e.attr("x",i).attr("y",a):e.attr("transform",f(i,a)),!0)},x.translatePoints=function(t,e,r){t.each((function(t){var i=n.select(this);x.translatePoint(t,i,e,r)}))},x.hideOutsideRangePoint=function(t,e,r,n,i,a){e.attr("display",r.isPtWithinRange(t,i)&&n.isPtWithinRange(t,a)?null:"none")},x.hideOutsideRangePoints=function(t,e){if(e._hasClipOnAxisFalse){var r=e.xaxis,i=e.yaxis;t.each((function(e){var a=e[0].trace,o=a.xcalendar,s=a.ycalendar,c=l.traceIs(a,"bar-like")?".bartext":".point,.textpoint";t.selectAll(c).each((function(t){x.hideOutsideRangePoint(t,n.select(this),r,i,o,s)}))}))}},x.crispRound=function(t,e,r){return e&&o(e)?t._context.staticPlot?e:e<1?1:Math.round(e):r||0},x.singleLineStyle=function(t,e,r,n,i){e.style("fill","none");var a=(((t||[])[0]||{}).trace||{}).line||{},o=r||a.width||0,s=i||a.dash||"";c.stroke(e,n||a.color),x.dashLine(e,s,o)},x.lineGroupStyle=function(t,e,r,i){t.style("fill","none").each((function(t){var a=(((t||[])[0]||{}).trace||{}).line||{},o=e||a.width||0,s=i||a.dash||"";n.select(this).call(c.stroke,r||a.color).call(x.dashLine,s,o)}))},x.dashLine=function(t,e,r){r=+r||0,e=x.dashStyle(e,r),t.style({"stroke-dasharray":e,"stroke-width":r+"px"})},x.dashStyle=function(t,e){e=+e||1;var r=Math.max(e,3);return"solid"===t?t="":"dot"===t?t=r+"px,"+r+"px":"dash"===t?t=3*r+"px,"+3*r+"px":"longdash"===t?t=5*r+"px,"+5*r+"px":"dashdot"===t?t=3*r+"px,"+r+"px,"+r+"px,"+r+"px":"longdashdot"===t&&(t=5*r+"px,"+2*r+"px,"+r+"px,"+2*r+"px"),t},x.singleFillStyle=function(t,e){var r=n.select(t.node());b(t,((r.data()[0]||[])[0]||{}).trace||{},e)},x.fillGroupStyle=function(t,e){t.style("stroke-width",0).each((function(t){var r=n.select(this);t[0].trace&&b(r,t[0].trace,e)}))};var _=t("./symbol_defs");x.symbolNames=[],x.symbolFuncs=[],x.symbolNeedLines={},x.symbolNoDot={},x.symbolNoFill={},x.symbolList=[],Object.keys(_).forEach((function(t){var e=_[t],r=e.n;x.symbolList.push(r,String(r),t,r+100,String(r+100),t+"-open"),x.symbolNames[r]=t,x.symbolFuncs[r]=e.f,e.needLine&&(x.symbolNeedLines[r]=!0),e.noDot?x.symbolNoDot[r]=!0:x.symbolList.push(r+200,String(r+200),t+"-dot",r+300,String(r+300),t+"-open-dot"),e.noFill&&(x.symbolNoFill[r]=!0)}));var w=x.symbolNames.length;function T(t,e){var r=t%100;return x.symbolFuncs[r](e)+(t>=200?"M0,0.5L0.5,0L0,-0.5L-0.5,0Z":"")}x.symbolNumber=function(t){if(o(t))t=+t;else if("string"==typeof t){var e=0;t.indexOf("-open")>0&&(e=100,t=t.replace("-open","")),t.indexOf("-dot")>0&&(e+=200,t=t.replace("-dot","")),(t=x.symbolNames.indexOf(t))>=0&&(t+=e)}return t%100>=w||t>=400?0:Math.floor(Math.max(t,0))};var k={x1:1,x2:0,y1:0,y2:0},A={x1:0,x2:0,y1:1,y2:0},M=a("~f"),S={radial:{node:"radialGradient"},radialreversed:{node:"radialGradient",reversed:!0},horizontal:{node:"linearGradient",attrs:k},horizontalreversed:{node:"linearGradient",attrs:k,reversed:!0},vertical:{node:"linearGradient",attrs:A},verticalreversed:{node:"linearGradient",attrs:A,reversed:!0}};x.gradient=function(t,e,r,a,o,l){for(var u=o.length,f=S[a],h=new Array(u),p=0;p=100,e.attr("d",T(u,l))}var f,h,p,d=!1;if(t.so)p=s.outlierwidth,h=s.outliercolor,f=o.outliercolor;else{var m=(s||{}).width;p=(t.mlw+1||m+1||(t.trace?(t.trace.marker.line||{}).width:0)+1)-1||0,h="mlc"in t?t.mlcc=n.lineScale(t.mlc):i.isArrayOrTypedArray(s.color)?c.defaultLine:s.color,i.isArrayOrTypedArray(o.color)&&(f=c.defaultLine,d=!0),f="mc"in t?t.mcc=n.markerScale(t.mc):o.color||"rgba(0,0,0,0)",n.selectedColorFn&&(f=n.selectedColorFn(t))}if(t.om)e.call(c.stroke,f).style({"stroke-width":(p||1)+"px",fill:"none"});else{e.style("stroke-width",(t.isBlank?0:p)+"px");var g=o.gradient,v=t.mgt;v?d=!0:v=g&&g.type,i.isArrayOrTypedArray(v)&&(v=v[0],S[v]||(v=0));var y=o.pattern,b=y&&x.getPatternAttr(y.shape,t.i,"");if(v&&"none"!==v){var _=t.mgc;_?d=!0:_=g.color;var w=r.uid;d&&(w+="-"+t.i),x.gradient(e,a,w,v,[[0,_],[1,f]],"fill")}else if(b){var k=x.getPatternAttr(y.bgcolor,t.i,null),A=x.getPatternAttr(y.fgcolor,t.i,null),M=y.fgopacity,E=x.getPatternAttr(y.size,t.i,8),L=x.getPatternAttr(y.solidity,t.i,.3),C=t.mcc||i.isArrayOrTypedArray(y.shape)||i.isArrayOrTypedArray(y.bgcolor)||i.isArrayOrTypedArray(y.size)||i.isArrayOrTypedArray(y.solidity),P=r.uid;C&&(P+="-"+t.i),x.pattern(e,"point",a,P,b,E,L,t.mcc,y.fillmode,k,A,M)}else c.fill(e,f);p&&c.stroke(e,h)}},x.makePointStyleFns=function(t){var e={},r=t.marker;return e.markerScale=x.tryColorscale(r,""),e.lineScale=x.tryColorscale(r,"line"),l.traceIs(t,"symbols")&&(e.ms2mrc=g.isBubble(t)?v(t):function(){return(r.size||6)/2}),t.selectedpoints&&i.extendFlat(e,x.makeSelectedPointStyleFns(t)),e},x.makeSelectedPointStyleFns=function(t){var e={},r=t.selected||{},n=t.unselected||{},a=t.marker||{},o=r.marker||{},s=n.marker||{},c=a.opacity,u=o.opacity,f=s.opacity,h=void 0!==u,p=void 0!==f;(i.isArrayOrTypedArray(c)||h||p)&&(e.selectedOpacityFn=function(t){var e=void 0===t.mo?a.opacity:t.mo;return t.selected?h?u:e:p?f:m*e});var d=a.color,g=o.color,v=s.color;(g||v)&&(e.selectedColorFn=function(t){var e=t.mcc||d;return t.selected?g||e:v||e});var y=a.size,x=o.size,b=s.size,_=void 0!==x,w=void 0!==b;return l.traceIs(t,"symbols")&&(_||w)&&(e.selectedSizeFn=function(t){var e=t.mrc||y/2;return t.selected?_?x/2:e:w?b/2:e}),e},x.makeSelectedTextStyleFns=function(t){var e={},r=t.selected||{},n=t.unselected||{},i=t.textfont||{},a=r.textfont||{},o=n.textfont||{},s=i.color,l=a.color,u=o.color;return e.selectedTextColorFn=function(t){var e=t.tc||s;return t.selected?l||e:u||(l?e:c.addOpacity(e,m))},e},x.selectedPointStyle=function(t,e){if(t.size()&&e.selectedpoints){var r=x.makeSelectedPointStyleFns(e),i=e.marker||{},a=[];r.selectedOpacityFn&&a.push((function(t,e){t.style("opacity",r.selectedOpacityFn(e))})),r.selectedColorFn&&a.push((function(t,e){c.fill(t,r.selectedColorFn(e))})),r.selectedSizeFn&&a.push((function(t,e){var n=e.mx||i.symbol||0,a=r.selectedSizeFn(e);t.attr("d",T(x.symbolNumber(n),a)),e.mrc2=a})),a.length&&t.each((function(t){for(var e=n.select(this),r=0;r0?r:0}x.textPointStyle=function(t,e,r){if(t.size()){var a;if(e.selectedpoints){var o=x.makeSelectedTextStyleFns(e);a=o.selectedTextColorFn}var s=e.texttemplate,l=r._fullLayout;t.each((function(t){var o=n.select(this),c=s?i.extractOption(t,e,"txt","texttemplate"):i.extractOption(t,e,"tx","text");if(c||0===c){if(s){var u=e._module.formatLabels,f=u?u(t,e,l):{},p={};y(p,e,t.i);var d=e._meta||{};c=i.texttemplateString(c,f,l._d3locale,p,t,d)}var m=t.tp||e.textposition,g=C(t,e),v=a?a(t):t.tc||e.textfont.color;o.call(x.font,t.tf||e.textfont.family,g,v).text(c).call(h.convertToTspans,r).call(L,m,g,t.mrc)}else o.remove()}))}},x.selectedTextStyle=function(t,e){if(t.size()&&e.selectedpoints){var r=x.makeSelectedTextStyleFns(e);t.each((function(t){var i=n.select(this),a=r.selectedTextColorFn(t),o=t.tp||e.textposition,s=C(t,e);c.fill(i,a);var u=l.traceIs(e,"bar-like");L(i,o,s,t.mrc2||t.mrc,u)}))}};function P(t,e,r,i){var a=t[0]-e[0],o=t[1]-e[1],s=r[0]-e[0],l=r[1]-e[1],c=Math.pow(a*a+o*o,.25),u=Math.pow(s*s+l*l,.25),f=(u*u*a-c*c*s)*i,h=(u*u*o-c*c*l)*i,p=3*u*(c+u),d=3*c*(c+u);return[[n.round(e[0]+(p&&f/p),2),n.round(e[1]+(p&&h/p),2)],[n.round(e[0]-(d&&f/d),2),n.round(e[1]-(d&&h/d),2)]]}x.smoothopen=function(t,e){if(t.length<3)return"M"+t.join("L");var r,n="M"+t[0],i=[];for(r=1;r=1e4&&(x.savedBBoxes={},z=0),r&&(x.savedBBoxes[r]=g),z++,i.extendFlat({},g)},x.setClipUrl=function(t,e,r){t.attr("clip-path",R(e,r))},x.getTranslate=function(t){var e=(t[t.attr?"attr":"getAttribute"]("transform")||"").replace(/.*\btranslate\((-?\d*\.?\d*)[^-\d]*(-?\d*\.?\d*)[^\d].*/,(function(t,e,r){return[e,r].join(" ")})).split(" ");return{x:+e[0]||0,y:+e[1]||0}},x.setTranslate=function(t,e,r){var n=t.attr?"attr":"getAttribute",i=t.attr?"attr":"setAttribute",a=t[n]("transform")||"";return e=e||0,r=r||0,a=a.replace(/(\btranslate\(.*?\);?)/,"").trim(),a=(a+=f(e,r)).trim(),t[i]("transform",a),a},x.getScale=function(t){var e=(t[t.attr?"attr":"getAttribute"]("transform")||"").replace(/.*\bscale\((\d*\.?\d*)[^\d]*(\d*\.?\d*)[^\d].*/,(function(t,e,r){return[e,r].join(" ")})).split(" ");return{x:+e[0]||1,y:+e[1]||1}},x.setScale=function(t,e,r){var n=t.attr?"attr":"getAttribute",i=t.attr?"attr":"setAttribute",a=t[n]("transform")||"";return e=e||1,r=r||1,a=a.replace(/(\bscale\(.*?\);?)/,"").trim(),a=(a+="scale("+e+","+r+")").trim(),t[i]("transform",a),a};var F=/\s*sc.*/;x.setPointGroupScale=function(t,e,r){if(e=e||1,r=r||1,t){var n=1===e&&1===r?"":"scale("+e+","+r+")";t.each((function(){var t=(this.getAttribute("transform")||"").replace(F,"");t=(t+=n).trim(),this.setAttribute("transform",t)}))}};var B=/translate\([^)]*\)\s*$/;x.setTextPointsScale=function(t,e,r){t&&t.each((function(){var t,i=n.select(this),a=i.select("text");if(a.node()){var o=parseFloat(a.attr("x")||0),s=parseFloat(a.attr("y")||0),l=(i.attr("transform")||"").match(B);t=1===e&&1===r?[]:[f(o,s),"scale("+e+","+r+")",f(-o,-s)],l&&t.push(l),i.attr("transform",t.join(""))}}))}},{"../../components/fx/helpers":397,"../../constants/alignment":466,"../../constants/interactions":473,"../../constants/xmlns_namespaces":475,"../../lib":498,"../../lib/svg_text_utils":524,"../../registry":633,"../../traces/scatter/make_bubble_size_func":939,"../../traces/scatter/subtypes":947,"../color":361,"../colorscale":373,"./symbol_defs":384,"@plotly/d3":58,"fast-isnumeric":185,tinycolor2:307}],384:[function(t,e,r){"use strict";var n=t("@plotly/d3");e.exports={circle:{n:0,f:function(t){var e=n.round(t,2);return"M"+e+",0A"+e+","+e+" 0 1,1 0,-"+e+"A"+e+","+e+" 0 0,1 "+e+",0Z"}},square:{n:1,f:function(t){var e=n.round(t,2);return"M"+e+","+e+"H-"+e+"V-"+e+"H"+e+"Z"}},diamond:{n:2,f:function(t){var e=n.round(1.3*t,2);return"M"+e+",0L0,"+e+"L-"+e+",0L0,-"+e+"Z"}},cross:{n:3,f:function(t){var e=n.round(.4*t,2),r=n.round(1.2*t,2);return"M"+r+","+e+"H"+e+"V"+r+"H-"+e+"V"+e+"H-"+r+"V-"+e+"H-"+e+"V-"+r+"H"+e+"V-"+e+"H"+r+"Z"}},x:{n:4,f:function(t){var e=n.round(.8*t/Math.sqrt(2),2),r="l"+e+","+e,i="l"+e+",-"+e,a="l-"+e+",-"+e,o="l-"+e+","+e;return"M0,"+e+r+i+a+i+a+o+a+o+r+o+r+"Z"}},"triangle-up":{n:5,f:function(t){var e=n.round(2*t/Math.sqrt(3),2);return"M-"+e+","+n.round(t/2,2)+"H"+e+"L0,-"+n.round(t,2)+"Z"}},"triangle-down":{n:6,f:function(t){var e=n.round(2*t/Math.sqrt(3),2);return"M-"+e+",-"+n.round(t/2,2)+"H"+e+"L0,"+n.round(t,2)+"Z"}},"triangle-left":{n:7,f:function(t){var e=n.round(2*t/Math.sqrt(3),2);return"M"+n.round(t/2,2)+",-"+e+"V"+e+"L-"+n.round(t,2)+",0Z"}},"triangle-right":{n:8,f:function(t){var e=n.round(2*t/Math.sqrt(3),2);return"M-"+n.round(t/2,2)+",-"+e+"V"+e+"L"+n.round(t,2)+",0Z"}},"triangle-ne":{n:9,f:function(t){var e=n.round(.6*t,2),r=n.round(1.2*t,2);return"M-"+r+",-"+e+"H"+e+"V"+r+"Z"}},"triangle-se":{n:10,f:function(t){var e=n.round(.6*t,2),r=n.round(1.2*t,2);return"M"+e+",-"+r+"V"+e+"H-"+r+"Z"}},"triangle-sw":{n:11,f:function(t){var e=n.round(.6*t,2),r=n.round(1.2*t,2);return"M"+r+","+e+"H-"+e+"V-"+r+"Z"}},"triangle-nw":{n:12,f:function(t){var e=n.round(.6*t,2),r=n.round(1.2*t,2);return"M-"+e+","+r+"V-"+e+"H"+r+"Z"}},pentagon:{n:13,f:function(t){var e=n.round(.951*t,2),r=n.round(.588*t,2),i=n.round(-t,2),a=n.round(-.309*t,2);return"M"+e+","+a+"L"+r+","+n.round(.809*t,2)+"H-"+r+"L-"+e+","+a+"L0,"+i+"Z"}},hexagon:{n:14,f:function(t){var e=n.round(t,2),r=n.round(t/2,2),i=n.round(t*Math.sqrt(3)/2,2);return"M"+i+",-"+r+"V"+r+"L0,"+e+"L-"+i+","+r+"V-"+r+"L0,-"+e+"Z"}},hexagon2:{n:15,f:function(t){var e=n.round(t,2),r=n.round(t/2,2),i=n.round(t*Math.sqrt(3)/2,2);return"M-"+r+","+i+"H"+r+"L"+e+",0L"+r+",-"+i+"H-"+r+"L-"+e+",0Z"}},octagon:{n:16,f:function(t){var e=n.round(.924*t,2),r=n.round(.383*t,2);return"M-"+r+",-"+e+"H"+r+"L"+e+",-"+r+"V"+r+"L"+r+","+e+"H-"+r+"L-"+e+","+r+"V-"+r+"Z"}},star:{n:17,f:function(t){var e=1.4*t,r=n.round(.225*e,2),i=n.round(.951*e,2),a=n.round(.363*e,2),o=n.round(.588*e,2),s=n.round(-e,2),l=n.round(-.309*e,2),c=n.round(.118*e,2),u=n.round(.809*e,2);return"M"+r+","+l+"H"+i+"L"+a+","+c+"L"+o+","+u+"L0,"+n.round(.382*e,2)+"L-"+o+","+u+"L-"+a+","+c+"L-"+i+","+l+"H-"+r+"L0,"+s+"Z"}},hexagram:{n:18,f:function(t){var e=n.round(.66*t,2),r=n.round(.38*t,2),i=n.round(.76*t,2);return"M-"+i+",0l-"+r+",-"+e+"h"+i+"l"+r+",-"+e+"l"+r+","+e+"h"+i+"l-"+r+","+e+"l"+r+","+e+"h-"+i+"l-"+r+","+e+"l-"+r+",-"+e+"h-"+i+"Z"}},"star-triangle-up":{n:19,f:function(t){var e=n.round(t*Math.sqrt(3)*.8,2),r=n.round(.8*t,2),i=n.round(1.6*t,2),a=n.round(4*t,2),o="A "+a+","+a+" 0 0 1 ";return"M-"+e+","+r+o+e+","+r+o+"0,-"+i+o+"-"+e+","+r+"Z"}},"star-triangle-down":{n:20,f:function(t){var e=n.round(t*Math.sqrt(3)*.8,2),r=n.round(.8*t,2),i=n.round(1.6*t,2),a=n.round(4*t,2),o="A "+a+","+a+" 0 0 1 ";return"M"+e+",-"+r+o+"-"+e+",-"+r+o+"0,"+i+o+e+",-"+r+"Z"}},"star-square":{n:21,f:function(t){var e=n.round(1.1*t,2),r=n.round(2*t,2),i="A "+r+","+r+" 0 0 1 ";return"M-"+e+",-"+e+i+"-"+e+","+e+i+e+","+e+i+e+",-"+e+i+"-"+e+",-"+e+"Z"}},"star-diamond":{n:22,f:function(t){var e=n.round(1.4*t,2),r=n.round(1.9*t,2),i="A "+r+","+r+" 0 0 1 ";return"M-"+e+",0"+i+"0,"+e+i+e+",0"+i+"0,-"+e+i+"-"+e+",0Z"}},"diamond-tall":{n:23,f:function(t){var e=n.round(.7*t,2),r=n.round(1.4*t,2);return"M0,"+r+"L"+e+",0L0,-"+r+"L-"+e+",0Z"}},"diamond-wide":{n:24,f:function(t){var e=n.round(1.4*t,2),r=n.round(.7*t,2);return"M0,"+r+"L"+e+",0L0,-"+r+"L-"+e+",0Z"}},hourglass:{n:25,f:function(t){var e=n.round(t,2);return"M"+e+","+e+"H-"+e+"L"+e+",-"+e+"H-"+e+"Z"},noDot:!0},bowtie:{n:26,f:function(t){var e=n.round(t,2);return"M"+e+","+e+"V-"+e+"L-"+e+","+e+"V-"+e+"Z"},noDot:!0},"circle-cross":{n:27,f:function(t){var e=n.round(t,2);return"M0,"+e+"V-"+e+"M"+e+",0H-"+e+"M"+e+",0A"+e+","+e+" 0 1,1 0,-"+e+"A"+e+","+e+" 0 0,1 "+e+",0Z"},needLine:!0,noDot:!0},"circle-x":{n:28,f:function(t){var e=n.round(t,2),r=n.round(t/Math.sqrt(2),2);return"M"+r+","+r+"L-"+r+",-"+r+"M"+r+",-"+r+"L-"+r+","+r+"M"+e+",0A"+e+","+e+" 0 1,1 0,-"+e+"A"+e+","+e+" 0 0,1 "+e+",0Z"},needLine:!0,noDot:!0},"square-cross":{n:29,f:function(t){var e=n.round(t,2);return"M0,"+e+"V-"+e+"M"+e+",0H-"+e+"M"+e+","+e+"H-"+e+"V-"+e+"H"+e+"Z"},needLine:!0,noDot:!0},"square-x":{n:30,f:function(t){var e=n.round(t,2);return"M"+e+","+e+"L-"+e+",-"+e+"M"+e+",-"+e+"L-"+e+","+e+"M"+e+","+e+"H-"+e+"V-"+e+"H"+e+"Z"},needLine:!0,noDot:!0},"diamond-cross":{n:31,f:function(t){var e=n.round(1.3*t,2);return"M"+e+",0L0,"+e+"L-"+e+",0L0,-"+e+"ZM0,-"+e+"V"+e+"M-"+e+",0H"+e},needLine:!0,noDot:!0},"diamond-x":{n:32,f:function(t){var e=n.round(1.3*t,2),r=n.round(.65*t,2);return"M"+e+",0L0,"+e+"L-"+e+",0L0,-"+e+"ZM-"+r+",-"+r+"L"+r+","+r+"M-"+r+","+r+"L"+r+",-"+r},needLine:!0,noDot:!0},"cross-thin":{n:33,f:function(t){var e=n.round(1.4*t,2);return"M0,"+e+"V-"+e+"M"+e+",0H-"+e},needLine:!0,noDot:!0,noFill:!0},"x-thin":{n:34,f:function(t){var e=n.round(t,2);return"M"+e+","+e+"L-"+e+",-"+e+"M"+e+",-"+e+"L-"+e+","+e},needLine:!0,noDot:!0,noFill:!0},asterisk:{n:35,f:function(t){var e=n.round(1.2*t,2),r=n.round(.85*t,2);return"M0,"+e+"V-"+e+"M"+e+",0H-"+e+"M"+r+","+r+"L-"+r+",-"+r+"M"+r+",-"+r+"L-"+r+","+r},needLine:!0,noDot:!0,noFill:!0},hash:{n:36,f:function(t){var e=n.round(t/2,2),r=n.round(t,2);return"M"+e+","+r+"V-"+r+"m-"+r+",0V"+r+"M"+r+","+e+"H-"+r+"m0,-"+r+"H"+r},needLine:!0,noFill:!0},"y-up":{n:37,f:function(t){var e=n.round(1.2*t,2),r=n.round(1.6*t,2),i=n.round(.8*t,2);return"M-"+e+","+i+"L0,0M"+e+","+i+"L0,0M0,-"+r+"L0,0"},needLine:!0,noDot:!0,noFill:!0},"y-down":{n:38,f:function(t){var e=n.round(1.2*t,2),r=n.round(1.6*t,2),i=n.round(.8*t,2);return"M-"+e+",-"+i+"L0,0M"+e+",-"+i+"L0,0M0,"+r+"L0,0"},needLine:!0,noDot:!0,noFill:!0},"y-left":{n:39,f:function(t){var e=n.round(1.2*t,2),r=n.round(1.6*t,2),i=n.round(.8*t,2);return"M"+i+","+e+"L0,0M"+i+",-"+e+"L0,0M-"+r+",0L0,0"},needLine:!0,noDot:!0,noFill:!0},"y-right":{n:40,f:function(t){var e=n.round(1.2*t,2),r=n.round(1.6*t,2),i=n.round(.8*t,2);return"M-"+i+","+e+"L0,0M-"+i+",-"+e+"L0,0M"+r+",0L0,0"},needLine:!0,noDot:!0,noFill:!0},"line-ew":{n:41,f:function(t){var e=n.round(1.4*t,2);return"M"+e+",0H-"+e},needLine:!0,noDot:!0,noFill:!0},"line-ns":{n:42,f:function(t){var e=n.round(1.4*t,2);return"M0,"+e+"V-"+e},needLine:!0,noDot:!0,noFill:!0},"line-ne":{n:43,f:function(t){var e=n.round(t,2);return"M"+e+",-"+e+"L-"+e+","+e},needLine:!0,noDot:!0,noFill:!0},"line-nw":{n:44,f:function(t){var e=n.round(t,2);return"M"+e+","+e+"L-"+e+",-"+e},needLine:!0,noDot:!0,noFill:!0},"arrow-up":{n:45,f:function(t){var e=n.round(t,2);return"M0,0L-"+e+","+n.round(2*t,2)+"H"+e+"Z"},noDot:!0},"arrow-down":{n:46,f:function(t){var e=n.round(t,2);return"M0,0L-"+e+",-"+n.round(2*t,2)+"H"+e+"Z"},noDot:!0},"arrow-left":{n:47,f:function(t){var e=n.round(2*t,2),r=n.round(t,2);return"M0,0L"+e+",-"+r+"V"+r+"Z"},noDot:!0},"arrow-right":{n:48,f:function(t){var e=n.round(2*t,2),r=n.round(t,2);return"M0,0L-"+e+",-"+r+"V"+r+"Z"},noDot:!0},"arrow-bar-up":{n:49,f:function(t){var e=n.round(t,2);return"M-"+e+",0H"+e+"M0,0L-"+e+","+n.round(2*t,2)+"H"+e+"Z"},needLine:!0,noDot:!0},"arrow-bar-down":{n:50,f:function(t){var e=n.round(t,2);return"M-"+e+",0H"+e+"M0,0L-"+e+",-"+n.round(2*t,2)+"H"+e+"Z"},needLine:!0,noDot:!0},"arrow-bar-left":{n:51,f:function(t){var e=n.round(2*t,2),r=n.round(t,2);return"M0,-"+r+"V"+r+"M0,0L"+e+",-"+r+"V"+r+"Z"},needLine:!0,noDot:!0},"arrow-bar-right":{n:52,f:function(t){var e=n.round(2*t,2),r=n.round(t,2);return"M0,-"+r+"V"+r+"M0,0L-"+e+",-"+r+"V"+r+"Z"},needLine:!0,noDot:!0}}},{"@plotly/d3":58}],385:[function(t,e,r){"use strict";e.exports={visible:{valType:"boolean",editType:"calc"},type:{valType:"enumerated",values:["percent","constant","sqrt","data"],editType:"calc"},symmetric:{valType:"boolean",editType:"calc"},array:{valType:"data_array",editType:"calc"},arrayminus:{valType:"data_array",editType:"calc"},value:{valType:"number",min:0,dflt:10,editType:"calc"},valueminus:{valType:"number",min:0,dflt:10,editType:"calc"},traceref:{valType:"integer",min:0,dflt:0,editType:"style"},tracerefminus:{valType:"integer",min:0,dflt:0,editType:"style"},copy_ystyle:{valType:"boolean",editType:"plot"},copy_zstyle:{valType:"boolean",editType:"style"},color:{valType:"color",editType:"style"},thickness:{valType:"number",min:0,dflt:2,editType:"style"},width:{valType:"number",min:0,editType:"plot"},editType:"calc",_deprecated:{opacity:{valType:"number",editType:"style"}}}},{}],386:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("../../registry"),a=t("../../plots/cartesian/axes"),o=t("../../lib"),s=t("./compute_error");function l(t,e,r,i){var l=e["error_"+i]||{},c=[];if(l.visible&&-1!==["linear","log"].indexOf(r.type)){for(var u=s(l),f=0;f0;e.each((function(e){var f,h=e[0].trace,p=h.error_x||{},d=h.error_y||{};h.ids&&(f=function(t){return t.id});var m=o.hasMarkers(h)&&h.marker.maxdisplayed>0;d.visible||p.visible||(e=[]);var g=n.select(this).selectAll("g.errorbar").data(e,f);if(g.exit().remove(),e.length){p.visible||g.selectAll("path.xerror").remove(),d.visible||g.selectAll("path.yerror").remove(),g.style("opacity",1);var v=g.enter().append("g").classed("errorbar",!0);u&&v.style("opacity",0).transition().duration(s.duration).style("opacity",1),a.setClipUrl(g,r.layerClipId,t),g.each((function(t){var e=n.select(this),r=function(t,e,r){var n={x:e.c2p(t.x),y:r.c2p(t.y)};void 0!==t.yh&&(n.yh=r.c2p(t.yh),n.ys=r.c2p(t.ys),i(n.ys)||(n.noYS=!0,n.ys=r.c2p(t.ys,!0)));void 0!==t.xh&&(n.xh=e.c2p(t.xh),n.xs=e.c2p(t.xs),i(n.xs)||(n.noXS=!0,n.xs=e.c2p(t.xs,!0)));return n}(t,l,c);if(!m||t.vis){var a,o=e.select("path.yerror");if(d.visible&&i(r.x)&&i(r.yh)&&i(r.ys)){var f=d.width;a="M"+(r.x-f)+","+r.yh+"h"+2*f+"m-"+f+",0V"+r.ys,r.noYS||(a+="m-"+f+",0h"+2*f),!o.size()?o=e.append("path").style("vector-effect","non-scaling-stroke").classed("yerror",!0):u&&(o=o.transition().duration(s.duration).ease(s.easing)),o.attr("d",a)}else o.remove();var h=e.select("path.xerror");if(p.visible&&i(r.y)&&i(r.xh)&&i(r.xs)){var g=(p.copy_ystyle?d:p).width;a="M"+r.xh+","+(r.y-g)+"v"+2*g+"m0,-"+g+"H"+r.xs,r.noXS||(a+="m0,-"+g+"v"+2*g),!h.size()?h=e.append("path").style("vector-effect","non-scaling-stroke").classed("xerror",!0):u&&(h=h.transition().duration(s.duration).ease(s.easing)),h.attr("d",a)}else h.remove()}}))}}))}},{"../../traces/scatter/subtypes":947,"../drawing":383,"@plotly/d3":58,"fast-isnumeric":185}],391:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../color");e.exports=function(t){t.each((function(t){var e=t[0].trace,r=e.error_y||{},a=e.error_x||{},o=n.select(this);o.selectAll("path.yerror").style("stroke-width",r.thickness+"px").call(i.stroke,r.color),a.copy_ystyle&&(a=r),o.selectAll("path.xerror").style("stroke-width",a.thickness+"px").call(i.stroke,a.color)}))}},{"../color":361,"@plotly/d3":58}],392:[function(t,e,r){"use strict";var n=t("../../plots/font_attributes"),i=t("./layout_attributes").hoverlabel,a=t("../../lib/extend").extendFlat;e.exports={hoverlabel:{bgcolor:a({},i.bgcolor,{arrayOk:!0}),bordercolor:a({},i.bordercolor,{arrayOk:!0}),font:n({arrayOk:!0,editType:"none"}),align:a({},i.align,{arrayOk:!0}),namelength:a({},i.namelength,{arrayOk:!0}),editType:"none"}}},{"../../lib/extend":488,"../../plots/font_attributes":580,"./layout_attributes":402}],393:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../registry");function a(t,e,r,i){i=i||n.identity,Array.isArray(t)&&(e[0][r]=i(t))}e.exports=function(t){var e=t.calcdata,r=t._fullLayout;function o(t){return function(e){return n.coerceHoverinfo({hoverinfo:e},{_module:t._module},r)}}for(var s=0;s=0&&r.index_[0]._length||ot<0||ot>w[0]._length)return d.unhoverRaw(t,e)}if(e.pointerX=at+_[0]._offset,e.pointerY=ot+w[0]._offset,V="xval"in e?v.flat(l,e.xval):v.p2c(_,at),H="yval"in e?v.flat(l,e.yval):v.p2c(w,ot),!i(V[0])||!i(H[0]))return o.warn("Fx.hover failed",e,t),d.unhoverRaw(t,e)}var ct=1/0;function ut(t,r){for(G=0;Gtt&&(et.splice(0,tt),ct=et[0].distance),y&&0!==B&&0===et.length){$.distance=B,$.index=!1;var f=W._module.hoverPoints($,K,Q,"closest",{hoverLayer:u._hoverlayer});if(f&&(f=f.filter((function(t){return t.spikeDistance<=B}))),f&&f.length){var h,d=f.filter((function(t){return t.xa.showspikes&&"hovered data"!==t.xa.spikesnap}));if(d.length){var m=d[0];i(m.x0)&&i(m.y0)&&(h=ht(m),(!nt.vLinePoint||nt.vLinePoint.spikeDistance>h.spikeDistance)&&(nt.vLinePoint=h))}var g=f.filter((function(t){return t.ya.showspikes&&"hovered data"!==t.ya.spikesnap}));if(g.length){var x=g[0];i(x.x0)&&i(x.y0)&&(h=ht(x),(!nt.hLinePoint||nt.hLinePoint.spikeDistance>h.spikeDistance)&&(nt.hLinePoint=h))}}}}}function ft(t,e,r){for(var n,i=null,a=1/0,o=0;o0&&Math.abs(t.distance)Tt-1;Mt--)Ct(et[Mt]);et=St,gt()}var Pt=t._hoverdata,It=[],Ot=j(t),zt=U(t);for(q=0;q1||et.length>1)||"closest"===P&&it&&et.length>1,Wt=p.combine(u.plot_bgcolor||p.background,u.paper_bgcolor),Xt=I(et,{gd:t,hovermode:P,rotateLabels:Yt,bgColor:Wt,container:u._hoverlayer,outerContainer:u._paper.node(),commonLabelOpts:u.hoverlabel,hoverdistance:u.hoverdistance});v.isUnifiedHover(P)||(!function(t,e,r){var n,i,a,o,s,l,c,u=0,f=1,h=t.size(),p=new Array(h),d=0;function m(t){var e=t[0],r=t[t.length-1];if(i=e.pmin-e.pos-e.dp+e.size,a=r.pos+r.dp+r.size-e.pmax,i>.01){for(s=t.length-1;s>=0;s--)t[s].dp+=i;n=!1}if(!(a<.01)){if(i<-.01){for(s=t.length-1;s>=0;s--)t[s].dp-=a;n=!1}if(n){var c=0;for(o=0;oe.pmax&&c++;for(o=t.length-1;o>=0&&!(c<=0);o--)(l=t[o]).pos>e.pmax-1&&(l.del=!0,c--);for(o=0;o=0;s--)t[s].dp-=a;for(o=t.length-1;o>=0&&!(c<=0);o--)(l=t[o]).pos+l.dp+l.size>e.pmax&&(l.del=!0,c--)}}}t.each((function(t){var n=t[e],i="x"===n._id.charAt(0),a=n.range;0===d&&a&&a[0]>a[1]!==i&&(f=-1),p[d++]=[{datum:t,traceIndex:t.trace.index,dp:0,pos:t.pos,posref:t.posref,size:t.by*(i?T:1)/2,pmin:0,pmax:i?r.width:r.height}]})),p.sort((function(t,e){return t[0].posref-e[0].posref||f*(e[0].traceIndex-t[0].traceIndex)}));for(;!n&&u<=h;){for(u++,n=!0,o=0;o.01&&y.pmin===x.pmin&&y.pmax===x.pmax){for(s=v.length-1;s>=0;s--)v[s].dp+=i;for(g.push.apply(g,v),p.splice(o+1,1),c=0,s=g.length-1;s>=0;s--)c+=g[s].dp;for(a=c/g.length,s=g.length-1;s>=0;s--)g[s].dp-=a;n=!1}else o++}p.forEach(m)}for(o=p.length-1;o>=0;o--){var b=p[o];for(s=b.length-1;s>=0;s--){var _=b[s],w=_.datum;w.offset=_.dp,w.del=_.del}}}(Xt,Yt?"xa":"ya",u),z(Xt,Yt,u._invScaleX,u._invScaleY));if(s&&s.tagName){var Zt=g.getComponentMethod("annotations","hasClickToShow")(t,It);f(n.select(s),Zt?"pointer":"")}if(!s||a||!function(t,e,r){if(!r||r.length!==t._hoverdata.length)return!0;for(var n=r.length-1;n>=0;n--){var i=r[n],a=t._hoverdata[n];if(i.curveNumber!==a.curveNumber||String(i.pointNumber)!==String(a.pointNumber)||String(i.pointNumbers)!==String(a.pointNumbers))return!0}return!1}(t,0,Pt))return;Pt&&t.emit("plotly_unhover",{event:e,points:Pt});t.emit("plotly_hover",{event:e,points:t._hoverdata,xaxes:_,yaxes:w,xvals:V,yvals:H})}(t,e,r,a,s)}))},r.loneHover=function(t,e){var r=!0;Array.isArray(t)||(r=!1,t=[t]);var i=e.gd,a=j(i),o=U(i),s=I(t.map((function(t){var r=t._x0||t.x0||t.x||0,n=t._x1||t.x1||t.x||0,s=t._y0||t.y0||t.y||0,l=t._y1||t.y1||t.y||0,c=t.eventData;if(c){var u=Math.min(r,n),f=Math.max(r,n),h=Math.min(s,l),d=Math.max(s,l),m=t.trace;if(g.traceIs(m,"gl3d")){var v=i._fullLayout[m.scene]._scene.container,y=v.offsetLeft,x=v.offsetTop;u+=y,f+=y,h+=x,d+=x}c.bbox={x0:u+o,x1:f+o,y0:h+a,y1:d+a},e.inOut_bbox&&e.inOut_bbox.push(c.bbox)}else c=!1;return{color:t.color||p.defaultLine,x0:t.x0||t.x||0,x1:t.x1||t.x||0,y0:t.y0||t.y||0,y1:t.y1||t.y||0,xLabel:t.xLabel,yLabel:t.yLabel,zLabel:t.zLabel,text:t.text,name:t.name,idealAlign:t.idealAlign,borderColor:t.borderColor,fontFamily:t.fontFamily,fontSize:t.fontSize,fontColor:t.fontColor,nameLength:t.nameLength,textAlign:t.textAlign,trace:t.trace||{index:0,hoverinfo:""},xa:{_offset:0},ya:{_offset:0},index:0,hovertemplate:t.hovertemplate||!1,hovertemplateLabels:t.hovertemplateLabels||!1,eventData:c}})),{gd:i,hovermode:"closest",rotateLabels:!1,bgColor:e.bgColor||p.background,container:n.select(e.container),outerContainer:e.outerContainer||e.container}),l=0,c=0;return s.sort((function(t,e){return t.y0-e.y0})).each((function(t,r){var n=t.y0-t.by/2;t.offset=n-5([\s\S]*)<\/extra>/;function I(t,e){var r=e.gd,i=r._fullLayout,a=e.hovermode,c=e.rotateLabels,f=e.bgColor,d=e.container,m=e.outerContainer,w=e.commonLabelOpts||{};if(0===t.length)return[[]];var T=e.fontFamily||y.HOVERFONT,k=e.fontSize||y.HOVERFONTSIZE,A=t[0],E=A.xa,L=A.ya,P=a.charAt(0),I=A[P+"Label"],z=V(r,m),D=z.top,R=z.width,F=z.height,B=void 0!==I&&A.distance<=e.hoverdistance&&("x"===a||"y"===a);if(B){var N,j,U=!0;for(N=0;Ni.width-b?(g=i.width-b,e.attr("d","M"+(b-M)+",0L"+b+","+x+M+"v"+x+(2*S+y.height)+"H-"+b+"V"+x+M+"H"+(b-2*M)+"Z")):e.attr("d","M0,0L"+M+","+x+M+"H"+(S+y.width/2)+"v"+x+(2*S+y.height)+"H-"+(S+y.width/2)+"V"+x+M+"H-"+M+"Z")}else{var _,C,P;"right"===L.side?(_="start",C=1,P="",g=E._offset+E._length):(_="end",C=-1,P="-",g=E._offset),v=L._offset+(A.y0+A.y1)/2,l.attr("text-anchor",_),e.attr("d","M0,0L"+P+M+","+M+"V"+(S+y.height/2)+"h"+P+(2*S+y.width)+"V-"+(S+y.height/2)+"H"+P+M+"V-"+M+"Z");var O,z=y.height/2,R=D-y.top-z,F="clip"+i._uid+"commonlabel"+L._id;if(g=0?at:ot+ct=0?ot:vt+ct=0?nt:it+ut=0?it:yt+ut=0,"top"!==t.idealAlign&&G||!Y?G?(z+=j/2,t.anchor="start"):t.anchor="middle":(z-=j/2,t.anchor="end");else if(t.pos=z,G=P+N/2+W<=R,Y=P-N/2-W>=0,"left"!==t.idealAlign&&G||!Y)if(G)P+=N/2,t.anchor="start";else{t.anchor="middle";var X=W/2,Z=P+X-R,J=P-X;Z>0&&(P-=Z),J<0&&(P+=-J)}else P-=N/2,t.anchor="end";w.attr("text-anchor",t.anchor),E&&A.attr("text-anchor",t.anchor),e.attr("transform",s(P,z)+(c?l(_):""))})),xt}function O(t,e,r,n,i,a){var s="",l="";void 0!==t.nameOverride&&(t.name=t.nameOverride),t.name&&(t.trace._meta&&(t.name=o.templateString(t.name,t.trace._meta)),s=B(t.name,t.nameLength));var c=r.charAt(0),u="x"===c?"y":"x";void 0!==t.zLabel?(void 0!==t.xLabel&&(l+="x: "+t.xLabel+"
"),void 0!==t.yLabel&&(l+="y: "+t.yLabel+"
"),"choropleth"!==t.trace.type&&"choroplethmapbox"!==t.trace.type&&(l+=(l?"z: ":"")+t.zLabel)):e&&t[c+"Label"]===i?l=t[u+"Label"]||"":void 0===t.xLabel?void 0!==t.yLabel&&"scattercarpet"!==t.trace.type&&(l=t.yLabel):l=void 0===t.yLabel?t.xLabel:"("+t.xLabel+", "+t.yLabel+")",!t.text&&0!==t.text||Array.isArray(t.text)||(l+=(l?"
":"")+t.text),void 0!==t.extraText&&(l+=(l?"
":"")+t.extraText),a&&""===l&&!t.hovertemplate&&(""===s&&a.remove(),l=s);var f=t.hovertemplate||!1;if(f){var h=t.hovertemplateLabels||t;t[c+"Label"]!==i&&(h[c+"other"]=h[c+"Val"],h[c+"otherLabel"]=h[c+"Label"]),l=(l=o.hovertemplateString(f,h,n._d3locale,t.eventData[0]||{},t.trace._meta)).replace(P,(function(e,r){return s=B(r,t.nameLength),""}))}return[l,s]}function z(t,e,r,i){var a=function(t){return t*r},o=function(t){return t*i};t.each((function(t){var r=n.select(this);if(t.del)return r.remove();var i=r.select("text.nums"),s=t.anchor,l="end"===s?-1:1,c={start:1,end:-1,middle:0}[s],f=c*(M+S),p=f+c*(t.txwidth+S),d=0,m=t.offset,g="middle"===s;g&&(f-=t.tx2width/2,p+=t.txwidth/2+S),e&&(m*=-A,d=t.offset*k),r.select("path").attr("d",g?"M-"+a(t.bx/2+t.tx2width/2)+","+o(m-t.by/2)+"h"+a(t.bx)+"v"+o(t.by)+"h-"+a(t.bx)+"Z":"M0,0L"+a(l*M+d)+","+o(M+m)+"v"+o(t.by/2-M)+"h"+a(l*t.bx)+"v-"+o(t.by)+"H"+a(l*M+d)+"V"+o(m-M)+"Z");var v=d+f,y=m+t.ty0-t.by/2+S,x=t.textAlign||"auto";"auto"!==x&&("left"===x&&"start"!==s?(i.attr("text-anchor","start"),v=g?-t.bx/2-t.tx2width/2+S:-t.bx-S):"right"===x&&"end"!==s&&(i.attr("text-anchor","end"),v=g?t.bx/2-t.tx2width/2-S:t.bx+S)),i.call(u.positionText,a(v),o(y)),t.tx2width&&(r.select("text.name").call(u.positionText,a(p+c*S+d),o(m+t.ty0-t.by/2+S)),r.select("rect").call(h.setRect,a(p+(c-1)*t.tx2width/2+d),o(m-t.by/2-1),a(t.tx2width),o(t.by+2)))}))}function D(t,e){var r=t.index,n=t.trace||{},a=t.cd[0],s=t.cd[r]||{};function l(t){return t||i(t)&&0===t}var c=Array.isArray(r)?function(t,e){var i=o.castOption(a,r,t);return l(i)?i:o.extractOption({},n,"",e)}:function(t,e){return o.extractOption(s,n,t,e)};function u(e,r,n){var i=c(r,n);l(i)&&(t[e]=i)}if(u("hoverinfo","hi","hoverinfo"),u("bgcolor","hbg","hoverlabel.bgcolor"),u("borderColor","hbc","hoverlabel.bordercolor"),u("fontFamily","htf","hoverlabel.font.family"),u("fontSize","hts","hoverlabel.font.size"),u("fontColor","htc","hoverlabel.font.color"),u("nameLength","hnl","hoverlabel.namelength"),u("textAlign","hta","hoverlabel.align"),t.posref="y"===e||"closest"===e&&"h"===n.orientation?t.xa._offset+(t.x0+t.x1)/2:t.ya._offset+(t.y0+t.y1)/2,t.x0=o.constrain(t.x0,0,t.xa._length),t.x1=o.constrain(t.x1,0,t.xa._length),t.y0=o.constrain(t.y0,0,t.ya._length),t.y1=o.constrain(t.y1,0,t.ya._length),void 0!==t.xLabelVal&&(t.xLabel="xLabel"in t?t.xLabel:m.hoverLabelText(t.xa,t.xLabelVal,n.xhoverformat),t.xVal=t.xa.c2d(t.xLabelVal)),void 0!==t.yLabelVal&&(t.yLabel="yLabel"in t?t.yLabel:m.hoverLabelText(t.ya,t.yLabelVal,n.yhoverformat),t.yVal=t.ya.c2d(t.yLabelVal)),void 0!==t.zLabelVal&&void 0===t.zLabel&&(t.zLabel=String(t.zLabelVal)),!(isNaN(t.xerr)||"log"===t.xa.type&&t.xerr<=0)){var f=m.tickText(t.xa,t.xa.c2l(t.xerr),"hover").text;void 0!==t.xerrneg?t.xLabel+=" +"+f+" / -"+m.tickText(t.xa,t.xa.c2l(t.xerrneg),"hover").text:t.xLabel+=" \xb1 "+f,"x"===e&&(t.distance+=1)}if(!(isNaN(t.yerr)||"log"===t.ya.type&&t.yerr<=0)){var h=m.tickText(t.ya,t.ya.c2l(t.yerr),"hover").text;void 0!==t.yerrneg?t.yLabel+=" +"+h+" / -"+m.tickText(t.ya,t.ya.c2l(t.yerrneg),"hover").text:t.yLabel+=" \xb1 "+h,"y"===e&&(t.distance+=1)}var p=t.hoverinfo||t.trace.hoverinfo;return p&&"all"!==p&&(-1===(p=Array.isArray(p)?p:p.split("+")).indexOf("x")&&(t.xLabel=void 0),-1===p.indexOf("y")&&(t.yLabel=void 0),-1===p.indexOf("z")&&(t.zLabel=void 0),-1===p.indexOf("text")&&(t.text=void 0),-1===p.indexOf("name")&&(t.name=void 0)),t}function R(t,e,r){var n,i,o=r.container,s=r.fullLayout,l=s._size,c=r.event,u=!!e.hLinePoint,f=!!e.vLinePoint;if(o.selectAll(".spikeline").remove(),f||u){var d=p.combine(s.plot_bgcolor,s.paper_bgcolor);if(u){var g,v,y=e.hLinePoint;n=y&&y.xa,"cursor"===(i=y&&y.ya).spikesnap?(g=c.pointerX,v=c.pointerY):(g=n._offset+y.x,v=i._offset+y.y);var x,b,_=a.readability(y.color,d)<1.5?p.contrast(d):y.color,w=i.spikemode,T=i.spikethickness,k=i.spikecolor||_,A=m.getPxPosition(t,i);if(-1!==w.indexOf("toaxis")||-1!==w.indexOf("across")){if(-1!==w.indexOf("toaxis")&&(x=A,b=g),-1!==w.indexOf("across")){var M=i._counterDomainMin,S=i._counterDomainMax;"free"===i.anchor&&(M=Math.min(M,i.position),S=Math.max(S,i.position)),x=l.l+M*l.w,b=l.l+S*l.w}o.insert("line",":first-child").attr({x1:x,x2:b,y1:v,y2:v,"stroke-width":T,stroke:k,"stroke-dasharray":h.dashStyle(i.spikedash,T)}).classed("spikeline",!0).classed("crisp",!0),o.insert("line",":first-child").attr({x1:x,x2:b,y1:v,y2:v,"stroke-width":T+2,stroke:d}).classed("spikeline",!0).classed("crisp",!0)}-1!==w.indexOf("marker")&&o.insert("circle",":first-child").attr({cx:A+("right"!==i.side?T:-T),cy:v,r:T,fill:k}).classed("spikeline",!0)}if(f){var E,L,C=e.vLinePoint;n=C&&C.xa,i=C&&C.ya,"cursor"===n.spikesnap?(E=c.pointerX,L=c.pointerY):(E=n._offset+C.x,L=i._offset+C.y);var P,I,O=a.readability(C.color,d)<1.5?p.contrast(d):C.color,z=n.spikemode,D=n.spikethickness,R=n.spikecolor||O,F=m.getPxPosition(t,n);if(-1!==z.indexOf("toaxis")||-1!==z.indexOf("across")){if(-1!==z.indexOf("toaxis")&&(P=F,I=L),-1!==z.indexOf("across")){var B=n._counterDomainMin,N=n._counterDomainMax;"free"===n.anchor&&(B=Math.min(B,n.position),N=Math.max(N,n.position)),P=l.t+(1-N)*l.h,I=l.t+(1-B)*l.h}o.insert("line",":first-child").attr({x1:E,x2:E,y1:P,y2:I,"stroke-width":D,stroke:R,"stroke-dasharray":h.dashStyle(n.spikedash,D)}).classed("spikeline",!0).classed("crisp",!0),o.insert("line",":first-child").attr({x1:E,x2:E,y1:P,y2:I,"stroke-width":D+2,stroke:d}).classed("spikeline",!0).classed("crisp",!0)}-1!==z.indexOf("marker")&&o.insert("circle",":first-child").attr({cx:E,cy:F-("top"!==n.side?D:-D),r:D,fill:R}).classed("spikeline",!0)}}}function F(t,e){return!e||(e.vLinePoint!==t._spikepoints.vLinePoint||e.hLinePoint!==t._spikepoints.hLinePoint)}function B(t,e){return u.plainText(t||"",{len:e,allowedTags:["br","sub","sup","b","i","em"]})}function N(t,e,r){var n=e[t+"a"],i=e[t+"Val"],a=e.cd[0];if("category"===n.type)i=n._categoriesMap[i];else if("date"===n.type){var o=e.trace[t+"periodalignment"];if(o){var s=e.cd[e.index],l=s[t+"Start"];void 0===l&&(l=s[t]);var c=s[t+"End"];void 0===c&&(c=s[t]);var u=c-l;"end"===o?i+=u:"middle"===o&&(i+=u/2)}i=n.d2c(i)}return a&&a.t&&a.t.posLetter===n._id&&("group"!==r.boxmode&&"group"!==r.violinmode||(i+=a.t.dPos)),i}function j(t){return t.offsetTop+t.clientTop}function U(t){return t.offsetLeft+t.clientLeft}function V(t,e){var r=t._fullLayout,n=e.getBoundingClientRect(),i=n.x,a=n.y,s=i+n.width,l=a+n.height,c=o.apply3DTransform(r._invTransform)(i,a),u=o.apply3DTransform(r._invTransform)(s,l),f=c[0],h=c[1],p=u[0],d=u[1];return{x:f,y:h,width:p-f,height:d-h,top:Math.min(h,d),left:Math.min(f,p),right:Math.max(f,p),bottom:Math.max(h,d)}}},{"../../lib":498,"../../lib/events":487,"../../lib/override_cursor":509,"../../lib/svg_text_utils":524,"../../plots/cartesian/axes":549,"../../registry":633,"../color":361,"../dragelement":380,"../drawing":383,"../legend/defaults":413,"../legend/draw":414,"./constants":395,"./helpers":397,"@plotly/d3":58,"fast-isnumeric":185,tinycolor2:307}],399:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../color"),a=t("./helpers").isUnifiedHover;e.exports=function(t,e,r,o){o=o||{};var s=e.legend;function l(t){o.font[t]||(o.font[t]=s?e.legend.font[t]:e.font[t])}e&&a(e.hovermode)&&(o.font||(o.font={}),l("size"),l("family"),l("color"),s?(o.bgcolor||(o.bgcolor=i.combine(e.legend.bgcolor,e.paper_bgcolor)),o.bordercolor||(o.bordercolor=e.legend.bordercolor)):o.bgcolor||(o.bgcolor=e.paper_bgcolor)),r("hoverlabel.bgcolor",o.bgcolor),r("hoverlabel.bordercolor",o.bordercolor),r("hoverlabel.namelength",o.namelength),n.coerceFont(r,"hoverlabel.font",o.font),r("hoverlabel.align",o.align)}},{"../../lib":498,"../color":361,"./helpers":397}],400:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes");e.exports=function(t,e){function r(r,a){return void 0!==e[r]?e[r]:n.coerce(t,e,i,r,a)}return r("clickmode"),r("hovermode")}},{"../../lib":498,"./layout_attributes":402}],401:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../dragelement"),o=t("./helpers"),s=t("./layout_attributes"),l=t("./hover");e.exports={moduleType:"component",name:"fx",constants:t("./constants"),schema:{layout:s},attributes:t("./attributes"),layoutAttributes:s,supplyLayoutGlobalDefaults:t("./layout_global_defaults"),supplyDefaults:t("./defaults"),supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc"),getDistanceFunction:o.getDistanceFunction,getClosest:o.getClosest,inbox:o.inbox,quadrature:o.quadrature,appendArrayPointValue:o.appendArrayPointValue,castHoverOption:function(t,e,r){return i.castOption(t,e,"hoverlabel."+r)},castHoverinfo:function(t,e,r){return i.castOption(t,r,"hoverinfo",(function(r){return i.coerceHoverinfo({hoverinfo:r},{_module:t._module},e)}))},hover:l.hover,unhover:a.unhover,loneHover:l.loneHover,loneUnhover:function(t){var e=i.isD3Selection(t)?t:n.select(t);e.selectAll("g.hovertext").remove(),e.selectAll(".spikeline").remove()},click:t("./click")}},{"../../lib":498,"../dragelement":380,"./attributes":392,"./calc":393,"./click":394,"./constants":395,"./defaults":396,"./helpers":397,"./hover":398,"./layout_attributes":402,"./layout_defaults":403,"./layout_global_defaults":404,"@plotly/d3":58}],402:[function(t,e,r){"use strict";var n=t("./constants"),i=t("../../plots/font_attributes"),a=i({editType:"none"});a.family.dflt=n.HOVERFONT,a.size.dflt=n.HOVERFONTSIZE,e.exports={clickmode:{valType:"flaglist",flags:["event","select"],dflt:"event",editType:"plot",extras:["none"]},dragmode:{valType:"enumerated",values:["zoom","pan","select","lasso","drawclosedpath","drawopenpath","drawline","drawrect","drawcircle","orbit","turntable",!1],dflt:"zoom",editType:"modebar"},hovermode:{valType:"enumerated",values:["x","y","closest",!1,"x unified","y unified"],dflt:"closest",editType:"modebar"},hoverdistance:{valType:"integer",min:-1,dflt:20,editType:"none"},spikedistance:{valType:"integer",min:-1,dflt:-1,editType:"none"},hoverlabel:{bgcolor:{valType:"color",editType:"none"},bordercolor:{valType:"color",editType:"none"},font:a,grouptitlefont:i({editType:"none"}),align:{valType:"enumerated",values:["left","right","auto"],dflt:"auto",editType:"none"},namelength:{valType:"integer",min:-1,dflt:15,editType:"none"},editType:"none"},selectdirection:{valType:"enumerated",values:["h","v","d","any"],dflt:"any",editType:"none"}}},{"../../plots/font_attributes":580,"./constants":395}],403:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes"),a=t("./hovermode_defaults"),o=t("./hoverlabel_defaults");e.exports=function(t,e){function r(r,a){return n.coerce(t,e,i,r,a)}a(t,e)&&(r("hoverdistance"),r("spikedistance")),"select"===r("dragmode")&&r("selectdirection");var s=e._has("mapbox"),l=e._has("geo"),c=e._basePlotModules.length;"zoom"===e.dragmode&&((s||l)&&1===c||s&&l&&2===c)&&(e.dragmode="pan"),o(t,e,r),n.coerceFont(r,"hoverlabel.grouptitlefont",e.hoverlabel.font)}},{"../../lib":498,"./hoverlabel_defaults":399,"./hovermode_defaults":400,"./layout_attributes":402}],404:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./hoverlabel_defaults"),a=t("./layout_attributes");e.exports=function(t,e){i(t,e,(function(r,i){return n.coerce(t,e,a,r,i)}))}},{"../../lib":498,"./hoverlabel_defaults":399,"./layout_attributes":402}],405:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../lib/regex").counter,a=t("../../plots/domain").attributes,o=t("../../plots/cartesian/constants").idRegex,s=t("../../plot_api/plot_template"),l={rows:{valType:"integer",min:1,editType:"plot"},roworder:{valType:"enumerated",values:["top to bottom","bottom to top"],dflt:"top to bottom",editType:"plot"},columns:{valType:"integer",min:1,editType:"plot"},subplots:{valType:"info_array",freeLength:!0,dimensions:2,items:{valType:"enumerated",values:[i("xy").toString(),""],editType:"plot"},editType:"plot"},xaxes:{valType:"info_array",freeLength:!0,items:{valType:"enumerated",values:[o.x.toString(),""],editType:"plot"},editType:"plot"},yaxes:{valType:"info_array",freeLength:!0,items:{valType:"enumerated",values:[o.y.toString(),""],editType:"plot"},editType:"plot"},pattern:{valType:"enumerated",values:["independent","coupled"],dflt:"coupled",editType:"plot"},xgap:{valType:"number",min:0,max:1,editType:"plot"},ygap:{valType:"number",min:0,max:1,editType:"plot"},domain:a({name:"grid",editType:"plot",noGridCell:!0},{}),xside:{valType:"enumerated",values:["bottom","bottom plot","top plot","top"],dflt:"bottom plot",editType:"plot"},yside:{valType:"enumerated",values:["left","left plot","right plot","right"],dflt:"left plot",editType:"plot"},editType:"plot"};function c(t,e,r){var n=e[r+"axes"],i=Object.keys((t._splomAxes||{})[r]||{});return Array.isArray(n)?n:i.length?i:void 0}function u(t,e,r,n,i,a){var o=e(t+"gap",r),s=e("domain."+t);e(t+"side",n);for(var l=new Array(i),c=s[0],u=(s[1]-c)/(i-o),f=u*(1-o),h=0;h1){if(!h&&!p&&!d)"independent"===k("pattern")&&(h=!0);g._hasSubplotGrid=h;var x,b,_="top to bottom"===k("roworder"),w=h?.2:.1,T=h?.3:.1;m&&e._splomGridDflt&&(x=e._splomGridDflt.xside,b=e._splomGridDflt.yside),g._domains={x:u("x",k,w,x,y),y:u("y",k,T,b,v,_)}}else delete e.grid}function k(t,e){return n.coerce(r,g,l,t,e)}},contentDefaults:function(t,e){var r=e.grid;if(r&&r._domains){var n,i,a,o,s,l,u,h=t.grid||{},p=e._subplots,d=r._hasSubplotGrid,m=r.rows,g=r.columns,v="independent"===r.pattern,y=r._axisMap={};if(d){var x=h.subplots||[];l=r.subplots=new Array(m);var b=1;for(n=0;n1);if(!1===_&&(e.legend=void 0),(!1!==_||f.uirevision)&&(p("uirevision",e.uirevision),!1!==_)){p("bgcolor",e.paper_bgcolor),p("bordercolor"),p("borderwidth");var w,T,k,A=i.coerceFont(p,"font",e.font),M="h"===p("orientation");if(M?(w=0,n.getComponentMethod("rangeslider","isVisible")(t.xaxis)?(T=1.1,k="bottom"):(T=-.1,k="top")):(w=1.02,T=1,k="auto"),p("traceorder",x),c.isGrouped(e.legend)&&p("tracegroupgap"),p("itemsizing"),p("itemwidth"),p("itemclick"),p("itemdoubleclick"),p("groupclick"),p("x",w),p("xanchor"),p("y",T),p("yanchor",k),p("valign"),i.noneOrAll(f,h,["x","y"]),p("title.text")){p("title.side",M?"left":"top");var S=i.extendFlat({},A,{size:i.bigFont(A.size)});i.coerceFont(p,"title.font",S)}}}},{"../../lib":498,"../../plot_api/plot_template":538,"../../plots/attributes":545,"../../plots/layout_attributes":605,"../../registry":633,"./attributes":411,"./helpers":417}],414:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../../plots/plots"),o=t("../../registry"),s=t("../../lib/events"),l=t("../dragelement"),c=t("../drawing"),u=t("../color"),f=t("../../lib/svg_text_utils"),h=t("./handle_click"),p=t("./constants"),d=t("../../constants/alignment"),m=d.LINE_SPACING,g=d.FROM_TL,v=d.FROM_BR,y=t("./get_legend_data"),x=t("./style"),b=t("./helpers");function _(t,e,r,n,i){var a=r.data()[0][0].trace,l={event:i,node:r.node(),curveNumber:a.index,expandedIndex:a._expandedIndex,data:t.data,layout:t.layout,frames:t._transitionData._frames,config:t._context,fullData:t._fullData,fullLayout:t._fullLayout};if(a._group&&(l.group=a._group),o.traceIs(a,"pie-like")&&(l.label=r.datum()[0].label),!1!==s.triggerHandler(t,"plotly_legendclick",l))if(1===n)e._clickTimeout=setTimeout((function(){t._fullLayout&&h(r,t,n)}),t._context.doubleClickDelay);else if(2===n){e._clickTimeout&&clearTimeout(e._clickTimeout),t._legendMouseDownTime=0,!1!==s.triggerHandler(t,"plotly_legenddoubleclick",l)&&h(r,t,n)}}function w(t,e,r){var n,a,s=t.data()[0][0],l=s.trace,u=o.traceIs(l,"pie-like"),h=!r._inHover&&e._context.edits.legendText&&!u,d=r._maxNameLength;s.groupTitle?(n=s.groupTitle.text,a=s.groupTitle.font):(a=r.font,r.entries?n=s.text:(n=u?s.label:l.name,l._meta&&(n=i.templateString(n,l._meta))));var m=i.ensureSingle(t,"text","legendtext");m.attr("text-anchor","start").call(c.font,a).text(h?T(n,d):n);var g=r.itemwidth+2*p.itemGap;f.positionText(m,g,0),h?m.call(f.makeEditable,{gd:e,text:n}).call(A,t,e,r).on("edit",(function(n){this.text(T(n,d)).call(A,t,e,r);var a=s.trace._fullInput||{},c={};if(o.hasTransform(a,"groupby")){var u=o.getTransformIndices(a,"groupby"),f=u[u.length-1],h=i.keyedContainer(a,"transforms["+f+"].styles","target","value.name");h.set(s.trace._group,n),c=h.constructUpdate()}else c.name=n;return o.call("_guiRestyle",e,c,l.index)})):A(m,t,e,r)}function T(t,e){var r=Math.max(4,e);if(t&&t.trim().length>=r/2)return t;for(var n=r-(t=t||"").length;n>0;n--)t+=" ";return t}function k(t,e){var r,a=e._context.doubleClickDelay,o=1,s=i.ensureSingle(t,"rect","legendtoggle",(function(t){e._context.staticPlot||t.style("cursor","pointer").attr("pointer-events","all"),t.call(u.fill,"rgba(0,0,0,0)")}));e._context.staticPlot||(s.on("mousedown",(function(){(r=(new Date).getTime())-e._legendMouseDownTimea&&(o=Math.max(o-1,1)),_(e,r,t,o,n.event)}})))}function A(t,e,r,n,i){n._inHover&&t.attr("data-notex",!0),f.convertToTspans(t,r,(function(){!function(t,e,r,n){var i=t.data()[0][0];if(!r._inHover&&i&&!i.trace.showlegend)return void t.remove();var a=t.select("g[class*=math-group]"),o=a.node();r||(r=e._fullLayout.legend);var s,l=r.borderwidth;s=1===n?r.title.font:i.groupTitle?i.groupTitle.font:r.font;var u,h,d=s.size*m;if(o){var g=c.bBox(o);u=g.height,h=g.width,1===n?c.setTranslate(a,l,l+.75*u):c.setTranslate(a,0,.25*u)}else{var v=t.select(1===n?".legendtitletext":".legendtext"),y=f.lineCount(v),x=v.node();if(u=d*y,h=x?c.bBox(x).width:0,1===n)"left"===r.title.side&&(h+=2*p.itemGap),f.positionText(v,l+p.titlePad,l+d);else{var b=2*p.itemGap+r.itemwidth;i.groupTitle&&(b=p.itemGap,h-=r.itemwidth),f.positionText(v,b,-d*((y-1)/2-.3))}}1===n?(r._titleWidth=h,r._titleHeight=u):(i.lineHeight=d,i.height=Math.max(u,16)+3,i.width=h)}(e,r,n,i)}))}function M(t){return i.isRightAnchor(t)?"right":i.isCenterAnchor(t)?"center":"left"}function S(t){return i.isBottomAnchor(t)?"bottom":i.isMiddleAnchor(t)?"middle":"top"}e.exports=function(t,e){return e||(e=t._fullLayout.legend||{}),function(t,e){var r,s,f=t._fullLayout,h="legend"+f._uid,d=e._inHover;d?(r=e.layer,h+="-hover"):r=f._infolayer;if(!r)return;t._legendMouseDownTime||(t._legendMouseDownTime=0);if(d){if(!e.entries)return;s=y(e.entries,e)}else{if(!t.calcdata)return;s=f.showlegend&&y(t.calcdata,e)}var m=f.hiddenlabels||[];if(!(d||f.showlegend&&s.length))return r.selectAll(".legend").remove(),f._topdefs.select("#"+h).remove(),a.autoMargin(t,"legend");var T=i.ensureSingle(r,"g","legend",(function(t){d||t.attr("pointer-events","all")})),E=i.ensureSingleById(f._topdefs,"clipPath",h,(function(t){t.append("rect")})),L=i.ensureSingle(T,"rect","bg",(function(t){t.attr("shape-rendering","crispEdges")}));L.call(u.stroke,e.bordercolor).call(u.fill,e.bgcolor).style("stroke-width",e.borderwidth+"px");var C=i.ensureSingle(T,"g","scrollbox"),P=e.title;if(e._titleWidth=0,e._titleHeight=0,P.text){var I=i.ensureSingle(C,"text","legendtitletext");I.attr("text-anchor","start").call(c.font,P.font).text(P.text),A(I,C,t,e,1)}else C.selectAll(".legendtitletext").remove();var O=i.ensureSingle(T,"rect","scrollbar",(function(t){t.attr(p.scrollBarEnterAttrs).call(u.fill,p.scrollBarColor)})),z=C.selectAll("g.groups").data(s);z.enter().append("g").attr("class","groups"),z.exit().remove();var D=z.selectAll("g.traces").data(i.identity);D.enter().append("g").attr("class","traces"),D.exit().remove(),D.style("opacity",(function(t){var e=t[0].trace;return o.traceIs(e,"pie-like")?-1!==m.indexOf(t[0].label)?.5:1:"legendonly"===e.visible?.5:1})).each((function(){n.select(this).call(w,t,e)})).call(x,t,e).each((function(){d||n.select(this).call(k,t)})),i.syncOrAsync([a.previousPromises,function(){return function(t,e,r,i){var a=t._fullLayout;i||(i=a.legend);var o=a._size,s=b.isVertical(i),l=b.isGrouped(i),u=i.borderwidth,f=2*u,h=p.itemGap,d=i.itemwidth+2*h,m=2*(u+h),g=S(i),v=i.y<0||0===i.y&&"top"===g,y=i.y>1||1===i.y&&"bottom"===g,x=i.tracegroupgap;i._maxHeight=Math.max(v||y?a.height/2:o.h,30);var _=0;i._width=0,i._height=0;var w=function(t){var e=0,r=0,n=t.title.side;n&&(-1!==n.indexOf("left")&&(e=t._titleWidth),-1!==n.indexOf("top")&&(r=t._titleHeight));return[e,r]}(i);if(s)r.each((function(t){var e=t[0].height;c.setTranslate(this,u+w[0],u+w[1]+i._height+e/2+h),i._height+=e,i._width=Math.max(i._width,t[0].width)})),_=d+i._width,i._width+=h+d+f,i._height+=m,l&&(e.each((function(t,e){c.setTranslate(this,0,e*i.tracegroupgap)})),i._height+=(i._lgroupsLength-1)*i.tracegroupgap);else{var T=M(i),k=i.x<0||0===i.x&&"right"===T,A=i.x>1||1===i.x&&"left"===T,E=y||v,L=a.width/2;i._maxWidth=Math.max(k?E&&"left"===T?o.l+o.w:L:A?E&&"right"===T?o.r+o.w:L:o.w,2*d);var C=0,P=0;r.each((function(t){var e=t[0].width+d;C=Math.max(C,e),P+=e})),_=null;var I=0;if(l){var O=0,z=0,D=0;e.each((function(){var t=0,e=0;n.select(this).selectAll("g.traces").each((function(r){var n=r[0].width,i=r[0].height;c.setTranslate(this,w[0],w[1]+u+h+i/2+e),e+=i,t=Math.max(t,d+n)}));var r=t+h;z>0&&r+u+z>i._maxWidth?(I=Math.max(I,z),z=0,D+=O+x,O=e):O=Math.max(O,e),c.setTranslate(this,z,D),z+=r})),i._width=Math.max(I,z)+u,i._height=D+O+m}else{var R=r.size(),F=P+f+(R-1)*h=i._maxWidth&&(I=Math.max(I,U),N=0,j+=B,i._height+=B,B=0),c.setTranslate(this,w[0]+u+N,w[1]+u+j+e/2+h),U=N+r+h,N+=n,B=Math.max(B,e)})),F?(i._width=N+f,i._height=B+m):(i._width=Math.max(I,U)+f,i._height+=B+m)}}i._width=Math.ceil(Math.max(i._width+w[0],i._titleWidth+2*(u+p.titlePad))),i._height=Math.ceil(Math.max(i._height+w[1],i._titleHeight+2*(u+p.itemGap))),i._effHeight=Math.min(i._height,i._maxHeight);var V=t._context.edits,H=V.legendText||V.legendPosition;r.each((function(t){var e=n.select(this).select(".legendtoggle"),r=t[0].height,i=H?d:_||d+t[0].width;s||(i+=h/2),c.setRect(e,0,-r/2,i,r)}))}(t,z,D,e)},function(){var s,u,m,y,x=f._size,b=e.borderwidth;if(!d){if(function(t){var e=t._fullLayout.legend,r=M(e),n=S(e);return a.autoMargin(t,"legend",{x:e.x,y:e.y,l:e._width*g[r],r:e._width*v[r],b:e._effHeight*v[n],t:e._effHeight*g[n]})}(t))return;var w=x.l+x.w*e.x-g[M(e)]*e._width,k=x.t+x.h*(1-e.y)-g[S(e)]*e._effHeight;if(f.margin.autoexpand){var A=w,P=k;w=i.constrain(w,0,f.width-e._width),k=i.constrain(k,0,f.height-e._effHeight),w!==A&&i.log("Constrain legend.x to make legend fit inside graph"),k!==P&&i.log("Constrain legend.y to make legend fit inside graph")}c.setTranslate(T,w,k)}if(O.on(".drag",null),T.on("wheel",null),d||e._height<=e._maxHeight||t._context.staticPlot){var I=e._effHeight;d&&(I=e._height),L.attr({width:e._width-b,height:I-b,x:b/2,y:b/2}),c.setTranslate(C,0,0),E.select("rect").attr({width:e._width-2*b,height:I-2*b,x:b,y:b}),c.setClipUrl(C,h,t),c.setRect(O,0,0,0,0),delete e._scrollY}else{var z,D,R,F=Math.max(p.scrollBarMinHeight,e._effHeight*e._effHeight/e._height),B=e._effHeight-F-2*p.scrollBarMargin,N=e._height-e._effHeight,j=B/N,U=Math.min(e._scrollY||0,N);L.attr({width:e._width-2*b+p.scrollBarWidth+p.scrollBarMargin,height:e._effHeight-b,x:b/2,y:b/2}),E.select("rect").attr({width:e._width-2*b+p.scrollBarWidth+p.scrollBarMargin,height:e._effHeight-2*b,x:b,y:b+U}),c.setClipUrl(C,h,t),q(U,F,j),T.on("wheel",(function(){q(U=i.constrain(e._scrollY+n.event.deltaY/B*N,0,N),F,j),0!==U&&U!==N&&n.event.preventDefault()}));var V=n.behavior.drag().on("dragstart",(function(){var t=n.event.sourceEvent;z="touchstart"===t.type?t.changedTouches[0].clientY:t.clientY,R=U})).on("drag",(function(){var t=n.event.sourceEvent;2===t.buttons||t.ctrlKey||(D="touchmove"===t.type?t.changedTouches[0].clientY:t.clientY,q(U=function(t,e,r){var n=(r-e)/j+t;return i.constrain(n,0,N)}(R,z,D),F,j))}));O.call(V);var H=n.behavior.drag().on("dragstart",(function(){var t=n.event.sourceEvent;"touchstart"===t.type&&(z=t.changedTouches[0].clientY,R=U)})).on("drag",(function(){var t=n.event.sourceEvent;"touchmove"===t.type&&(D=t.changedTouches[0].clientY,q(U=function(t,e,r){var n=(e-r)/j+t;return i.constrain(n,0,N)}(R,z,D),F,j))}));C.call(H)}function q(r,n,i){e._scrollY=t._fullLayout.legend._scrollY=r,c.setTranslate(C,0,-r),c.setRect(O,e._width,p.scrollBarMargin+r*i,p.scrollBarWidth,n),E.select("rect").attr("y",b+r)}t._context.edits.legendPosition&&(T.classed("cursor-move",!0),l.init({element:T.node(),gd:t,prepFn:function(){var t=c.getTranslate(T);m=t.x,y=t.y},moveFn:function(t,r){var n=m+t,i=y+r;c.setTranslate(T,n,i),s=l.align(n,0,x.l,x.l+x.w,e.xanchor),u=l.align(i,0,x.t+x.h,x.t,e.yanchor)},doneFn:function(){void 0!==s&&void 0!==u&&o.call("_guiRelayout",t,{"legend.x":s,"legend.y":u})},clickFn:function(e,n){var i=r.selectAll("g.traces").filter((function(){var t=this.getBoundingClientRect();return n.clientX>=t.left&&n.clientX<=t.right&&n.clientY>=t.top&&n.clientY<=t.bottom}));i.size()>0&&_(t,T,i,e,n)}}))}],t)}(t,e)}},{"../../constants/alignment":466,"../../lib":498,"../../lib/events":487,"../../lib/svg_text_utils":524,"../../plots/plots":614,"../../registry":633,"../color":361,"../dragelement":380,"../drawing":383,"./constants":412,"./get_legend_data":415,"./handle_click":416,"./helpers":417,"./style":419,"@plotly/d3":58}],415:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("./helpers");e.exports=function(t,e){var r,a,o=e._inHover,s=i.isGrouped(e),l=i.isReversed(e),c={},u=[],f=!1,h={},p=0,d=0;function m(t,r){if(""!==t&&i.isGrouped(e))-1===u.indexOf(t)?(u.push(t),f=!0,c[t]=[r]):c[t].push(r);else{var n="~~i"+p;u.push(n),c[n]=[r],p++}}for(r=0;rA&&(k=A)}w[r][0]._groupMinRank=k,w[r][0]._preGroupSort=r}var M=function(t,e){return t.trace.legendrank-e.trace.legendrank||t._preSort-e._preSort};for(w.forEach((function(t,e){t[0]._preGroupSort=e})),w.sort((function(t,e){return t[0]._groupMinRank-e[0]._groupMinRank||t[0]._preGroupSort-e[0]._preGroupSort})),r=0;rr?r:t}e.exports=function(t,e,r){var v=e._fullLayout;r||(r=v.legend);var y="constant"===r.itemsizing,x=r.itemwidth,b=(x+2*p.itemGap)/2,_=o(b,0),w=function(t,e,r,n){var i;if(t+1)i=t;else{if(!(e&&e.width>0))return 0;i=e.width}return y?n:Math.min(i,r)};function T(t,a,o){var u=t[0].trace,f=u.marker||{},h=f.line||{},p=o?u.visible&&u.type===o:i.traceIs(u,"bar"),d=n.select(a).select("g.legendpoints").selectAll("path.legend"+o).data(p?[t]:[]);d.enter().append("path").classed("legend"+o,!0).attr("d","M6,6H-6V-6H6Z").attr("transform",_),d.exit().remove(),d.each((function(t){var i=n.select(this),a=t[0],o=w(a.mlw,f.line,5,2);i.style("stroke-width",o+"px");var p=a.mcc;if(!r._inHover&&"mc"in a){var d=c(f),m=d.mid;void 0===m&&(m=(d.max+d.min)/2),p=s.tryColorscale(f,"")(m)}var v=p||a.mc||f.color,y=f.pattern,x=y&&s.getPatternAttr(y.shape,0,"");if(x){var b=s.getPatternAttr(y.bgcolor,0,null),_=s.getPatternAttr(y.fgcolor,0,null),T=y.fgopacity,k=g(y.size,8,10),A=g(y.solidity,.5,1),M="legend-"+u.uid;i.call(s.pattern,"legend",e,M,x,k,A,p,y.fillmode,b,_,T)}else i.call(l.fill,v);o&&l.stroke(i,a.mlc||h.color)}))}function k(t,e,r){var o=t[0],s=o.trace,l=r?s.visible&&s.type===r:i.traceIs(s,r),c=n.select(e).select("g.legendpoints").selectAll("path.legend"+r).data(l?[t]:[]);if(c.enter().append("path").classed("legend"+r,!0).attr("d","M6,6H-6V-6H6Z").attr("transform",_),c.exit().remove(),c.size()){var u=(s.marker||{}).line,p=w(h(u.width,o.pts),u,5,2),d=a.minExtend(s,{marker:{line:{width:p}}});d.marker.line.color=u.color;var m=a.minExtend(o,{trace:d});f(c,m,d)}}t.each((function(t){var e=n.select(this),i=a.ensureSingle(e,"g","layers");i.style("opacity",t[0].trace.opacity);var s=r.valign,l=t[0].lineHeight,c=t[0].height;if("middle"!==s&&l&&c){var u={top:1,bottom:-1}[s]*(.5*(l-c+3));i.attr("transform",o(0,u))}else i.attr("transform",null);i.selectAll("g.legendfill").data([t]).enter().append("g").classed("legendfill",!0),i.selectAll("g.legendlines").data([t]).enter().append("g").classed("legendlines",!0);var f=i.selectAll("g.legendsymbols").data([t]);f.enter().append("g").classed("legendsymbols",!0),f.selectAll("g.legendpoints").data([t]).enter().append("g").classed("legendpoints",!0)})).each((function(t){var r,i=t[0].trace,o=[];if(i.visible)switch(i.type){case"histogram2d":case"heatmap":o=[["M-15,-2V4H15V-2Z"]],r=!0;break;case"choropleth":case"choroplethmapbox":o=[["M-6,-6V6H6V-6Z"]],r=!0;break;case"densitymapbox":o=[["M-6,0 a6,6 0 1,0 12,0 a 6,6 0 1,0 -12,0"]],r="radial";break;case"cone":o=[["M-6,2 A2,2 0 0,0 -6,6 V6L6,4Z"],["M-6,-6 A2,2 0 0,0 -6,-2 L6,-4Z"],["M-6,-2 A2,2 0 0,0 -6,2 L6,0Z"]],r=!1;break;case"streamtube":o=[["M-6,2 A2,2 0 0,0 -6,6 H6 A2,2 0 0,1 6,2 Z"],["M-6,-6 A2,2 0 0,0 -6,-2 H6 A2,2 0 0,1 6,-6 Z"],["M-6,-2 A2,2 0 0,0 -6,2 H6 A2,2 0 0,1 6,-2 Z"]],r=!1;break;case"surface":o=[["M-6,-6 A2,3 0 0,0 -6,0 H6 A2,3 0 0,1 6,-6 Z"],["M-6,1 A2,3 0 0,1 -6,6 H6 A2,3 0 0,0 6,0 Z"]],r=!0;break;case"mesh3d":o=[["M-6,6H0L-6,-6Z"],["M6,6H0L6,-6Z"],["M-6,-6H6L0,6Z"]],r=!1;break;case"volume":o=[["M-6,6H0L-6,-6Z"],["M6,6H0L6,-6Z"],["M-6,-6H6L0,6Z"]],r=!0;break;case"isosurface":o=[["M-6,6H0L-6,-6Z"],["M6,6H0L6,-6Z"],["M-6,-6 A12,24 0 0,0 6,-6 L0,6Z"]],r=!1}var u=n.select(this).select("g.legendpoints").selectAll("path.legend3dandfriends").data(o);u.enter().append("path").classed("legend3dandfriends",!0).attr("transform",_).style("stroke-miterlimit",1),u.exit().remove(),u.each((function(t,o){var u,f=n.select(this),h=c(i),p=h.colorscale,m=h.reversescale;if(p){if(!r){var g=p.length;u=0===o?p[m?g-1:0][1]:1===o?p[m?0:g-1][1]:p[Math.floor((g-1)/2)][1]}}else{var v=i.vertexcolor||i.facecolor||i.color;u=a.isArrayOrTypedArray(v)?v[o]||v[0]:v}f.attr("d",t[0]),u?f.call(l.fill,u):f.call((function(t){if(t.size()){var n="legendfill-"+i.uid;s.gradient(t,e,n,d(m,"radial"===r),p,"fill")}}))}))})).each((function(t){var e=t[0].trace,r="waterfall"===e.type;if(t[0]._distinct&&r){var i=t[0].trace[t[0].dir].marker;return t[0].mc=i.color,t[0].mlw=i.line.width,t[0].mlc=i.line.color,T(t,this,"waterfall")}var a=[];e.visible&&r&&(a=t[0].hasTotals?[["increasing","M-6,-6V6H0Z"],["totals","M6,6H0L-6,-6H-0Z"],["decreasing","M6,6V-6H0Z"]]:[["increasing","M-6,-6V6H6Z"],["decreasing","M6,6V-6H-6Z"]]);var o=n.select(this).select("g.legendpoints").selectAll("path.legendwaterfall").data(a);o.enter().append("path").classed("legendwaterfall",!0).attr("transform",_).style("stroke-miterlimit",1),o.exit().remove(),o.each((function(t){var r=n.select(this),i=e[t[0]].marker,a=w(void 0,i.line,5,2);r.attr("d",t[1]).style("stroke-width",a+"px").call(l.fill,i.color),a&&r.call(l.stroke,i.line.color)}))})).each((function(t){T(t,this,"funnel")})).each((function(t){T(t,this)})).each((function(t){var r=t[0].trace,o=n.select(this).select("g.legendpoints").selectAll("path.legendbox").data(r.visible&&i.traceIs(r,"box-violin")?[t]:[]);o.enter().append("path").classed("legendbox",!0).attr("d","M6,6H-6V-6H6Z").attr("transform",_),o.exit().remove(),o.each((function(){var t=n.select(this);if("all"!==r.boxpoints&&"all"!==r.points||0!==l.opacity(r.fillcolor)||0!==l.opacity((r.line||{}).color)){var i=w(void 0,r.line,5,2);t.style("stroke-width",i+"px").call(l.fill,r.fillcolor),i&&l.stroke(t,r.line.color)}else{var c=a.minExtend(r,{marker:{size:y?12:a.constrain(r.marker.size,2,16),sizeref:1,sizemin:1,sizemode:"diameter"}});o.call(s.pointStyle,c,e)}}))})).each((function(t){k(t,this,"funnelarea")})).each((function(t){k(t,this,"pie")})).each((function(t){var r,i,o=m(t),l=o.showFill,f=o.showLine,h=o.showGradientLine,p=o.showGradientFill,g=o.anyFill,v=o.anyLine,y=t[0],b=y.trace,_=c(b),T=_.colorscale,k=_.reversescale,A=u.hasMarkers(b)||!g?"M5,0":v?"M5,-2":"M5,-3",M=n.select(this),S=M.select(".legendfill").selectAll("path").data(l||p?[t]:[]);if(S.enter().append("path").classed("js-fill",!0),S.exit().remove(),S.attr("d",A+"h"+x+"v6h-"+x+"z").call((function(t){if(t.size())if(l)s.fillGroupStyle(t,e);else{var r="legendfill-"+b.uid;s.gradient(t,e,r,d(k),T,"fill")}})),f||h){var E=w(void 0,b.line,10,5);i=a.minExtend(b,{line:{width:E}}),r=[a.minExtend(y,{trace:i})]}var L=M.select(".legendlines").selectAll("path").data(f||h?[r]:[]);L.enter().append("path").classed("js-line",!0),L.exit().remove(),L.attr("d",A+(h?"l"+x+",0.0001":"h"+x)).call(f?s.lineGroupStyle:function(t){if(t.size()){var r="legendline-"+b.uid;s.lineGroupStyle(t),s.gradient(t,e,r,d(k),T,"stroke")}})})).each((function(t){var r,i,o=m(t),l=o.anyFill,c=o.anyLine,f=o.showLine,h=o.showMarker,p=t[0],d=p.trace,g=!h&&!c&&!l&&u.hasText(d);function v(t,e,r,n){var i=a.nestedProperty(d,t).get(),o=a.isArrayOrTypedArray(i)&&e?e(i):i;if(y&&o&&void 0!==n&&(o=n),r){if(or[1])return r[1]}return o}function x(t){return p._distinct&&p.index&&t[p.index]?t[p.index]:t[0]}if(h||g||f){var b={},w={};if(h){b.mc=v("marker.color",x),b.mx=v("marker.symbol",x),b.mo=v("marker.opacity",a.mean,[.2,1]),b.mlc=v("marker.line.color",x),b.mlw=v("marker.line.width",a.mean,[0,5],2),w.marker={sizeref:1,sizemin:1,sizemode:"diameter"};var T=v("marker.size",a.mean,[2,16],12);b.ms=T,w.marker.size=T}f&&(w.line={width:v("line.width",x,[0,10],5)}),g&&(b.tx="Aa",b.tp=v("textposition",x),b.ts=10,b.tc=v("textfont.color",x),b.tf=v("textfont.family",x)),r=[a.minExtend(p,b)],(i=a.minExtend(d,w)).selectedpoints=null,i.texttemplate=null}var k=n.select(this).select("g.legendpoints"),A=k.selectAll("path.scatterpts").data(h?r:[]);A.enter().insert("path",":first-child").classed("scatterpts",!0).attr("transform",_),A.exit().remove(),A.call(s.pointStyle,i,e),h&&(r[0].mrc=3);var M=k.selectAll("g.pointtext").data(g?r:[]);M.enter().append("g").classed("pointtext",!0).append("text").attr("transform",_),M.exit().remove(),M.selectAll("text").call(s.textPointStyle,i,e)})).each((function(t){var e=t[0].trace,r=n.select(this).select("g.legendpoints").selectAll("path.legendcandle").data(e.visible&&"candlestick"===e.type?[t,t]:[]);r.enter().append("path").classed("legendcandle",!0).attr("d",(function(t,e){return e?"M-15,0H-8M-8,6V-6H8Z":"M15,0H8M8,-6V6H-8Z"})).attr("transform",_).style("stroke-miterlimit",1),r.exit().remove(),r.each((function(t,r){var i=n.select(this),a=e[r?"increasing":"decreasing"],o=w(void 0,a.line,5,2);i.style("stroke-width",o+"px").call(l.fill,a.fillcolor),o&&l.stroke(i,a.line.color)}))})).each((function(t){var e=t[0].trace,r=n.select(this).select("g.legendpoints").selectAll("path.legendohlc").data(e.visible&&"ohlc"===e.type?[t,t]:[]);r.enter().append("path").classed("legendohlc",!0).attr("d",(function(t,e){return e?"M-15,0H0M-8,-6V0":"M15,0H0M8,6V0"})).attr("transform",_).style("stroke-miterlimit",1),r.exit().remove(),r.each((function(t,r){var i=n.select(this),a=e[r?"increasing":"decreasing"],o=w(void 0,a.line,5,2);i.style("fill","none").call(s.dashLine,a.line.dash,o),o&&l.stroke(i,a.line.color)}))}))}},{"../../lib":498,"../../registry":633,"../../traces/pie/helpers":901,"../../traces/pie/style_one":907,"../../traces/scatter/subtypes":947,"../color":361,"../colorscale/helpers":372,"../drawing":383,"./constants":412,"@plotly/d3":58}],420:[function(t,e,r){"use strict";t("./constants");e.exports={editType:"modebar",orientation:{valType:"enumerated",values:["v","h"],dflt:"h",editType:"modebar"},bgcolor:{valType:"color",editType:"modebar"},color:{valType:"color",editType:"modebar"},activecolor:{valType:"color",editType:"modebar"},uirevision:{valType:"any",editType:"none"},add:{valType:"string",arrayOk:!0,dflt:"",editType:"modebar"},remove:{valType:"string",arrayOk:!0,dflt:"",editType:"modebar"}}},{"./constants":422}],421:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../plots/plots"),a=t("../../plots/cartesian/axis_ids"),o=t("../../fonts/ploticon"),s=t("../shapes/draw").eraseActiveShape,l=t("../../lib"),c=l._,u=e.exports={};function f(t,e){var r,i,o=e.currentTarget,s=o.getAttribute("data-attr"),l=o.getAttribute("data-val")||!0,c=t._fullLayout,u={},f=a.list(t,null,!0),h=c._cartesianSpikesEnabled;if("zoom"===s){var p,d="in"===l?.5:2,m=(1+d)/2,g=(1-d)/2;for(i=0;i1?(I=["toggleHover"],O=["resetViews"]):v?(P=["zoomInGeo","zoomOutGeo"],I=["hoverClosestGeo"],O=["resetGeo"]):g?(I=["hoverClosest3d"],O=["resetCameraDefault3d","resetCameraLastSave3d"]):w?(P=["zoomInMapbox","zoomOutMapbox"],I=["toggleHover"],O=["resetViewMapbox"]):b?I=["hoverClosestGl2d"]:y?I=["hoverClosestPie"]:A?(I=["hoverClosestCartesian","hoverCompareCartesian"],O=["resetViewSankey"]):I=["toggleHover"];m&&(I=["toggleSpikelines","hoverClosestCartesian","hoverCompareCartesian"]);(function(t){for(var e=0;e0)){var m=function(t,e,r){for(var n=r.filter((function(r){return e[r].anchor===t._id})),i=0,a=0;a=n.max)e=F[r+1];else if(t=n.pmax)e=F[r+1];else if(t0?h+c:c;return{ppad:c,ppadplus:u?d:m,ppadminus:u?m:d}}return{ppad:c}}function u(t,e,r,n,i){var s="category"===t.type||"multicategory"===t.type?t.r2c:t.d2c;if(void 0!==e)return[s(e),s(r)];if(n){var l,c,u,f,h=1/0,p=-1/0,d=n.match(a.segmentRE);for("date"===t.type&&(s=o.decodeDate(s)),l=0;lp&&(p=f)));return p>=h?[h,p]:void 0}}e.exports=function(t){var e=t._fullLayout,r=n.filterVisible(e.shapes);if(r.length&&t._fullData.length)for(var o=0;oy?(k=f,E="y0",A=y,L="y1"):(k=y,E="y1",A=f,L="y0");Z(n),Q(s,r),function(t,e,r){var n=e.xref,i=e.yref,o=a.getFromId(r,n),s=a.getFromId(r,i),l="";"paper"===n||o.autorange||(l+=n);"paper"===i||s.autorange||(l+=i);u.setClipUrl(t,l?"clip"+r._fullLayout._uid+l:null,r)}(e,r,t),X.moveFn="move"===O?J:K,X.altKey=n.altKey},doneFn:function(){if(v(t))return;p(e),$(s),b(e,t,r),n.call("_guiRelayout",t,l.getUpdateObj())},clickFn:function(){if(v(t))return;$(s)}};function Z(r){if(v(t))O=null;else if(R)O="path"===r.target.tagName?"move":"start-point"===r.target.attributes["data-line-point"].value?"resize-over-start-point":"resize-over-end-point";else{var n=X.element.getBoundingClientRect(),i=n.right-n.left,a=n.bottom-n.top,o=r.clientX-n.left,s=r.clientY-n.top,l=!F&&i>10&&a>10&&!r.shiftKey?h.getCursor(o/i,1-s/a):"move";p(e,l),O=l.split("-")[0]}}function J(n,i){if("path"===r.type){var a=function(t){return t},o=a,l=a;z?B("xanchor",r.xanchor=G(x+n)):(o=function(t){return G(H(t)+n)},N&&"date"===N.type&&(o=m.encodeDate(o))),D?B("yanchor",r.yanchor=Y(T+i)):(l=function(t){return Y(q(t)+i)},U&&"date"===U.type&&(l=m.encodeDate(l))),B("path",r.path=w(I,o,l))}else z?B("xanchor",r.xanchor=G(x+n)):(B("x0",r.x0=G(c+n)),B("x1",r.x1=G(g+n))),D?B("yanchor",r.yanchor=Y(T+i)):(B("y0",r.y0=Y(f+i)),B("y1",r.y1=Y(y+i)));e.attr("d",_(t,r)),Q(s,r)}function K(n,i){if(F){var a=function(t){return t},o=a,l=a;z?B("xanchor",r.xanchor=G(x+n)):(o=function(t){return G(H(t)+n)},N&&"date"===N.type&&(o=m.encodeDate(o))),D?B("yanchor",r.yanchor=Y(T+i)):(l=function(t){return Y(q(t)+i)},U&&"date"===U.type&&(l=m.encodeDate(l))),B("path",r.path=w(I,o,l))}else if(R){if("resize-over-start-point"===O){var u=c+n,h=D?f-i:f+i;B("x0",r.x0=z?u:G(u)),B("y0",r.y0=D?h:Y(h))}else if("resize-over-end-point"===O){var p=g+n,d=D?y-i:y+i;B("x1",r.x1=z?p:G(p)),B("y1",r.y1=D?d:Y(d))}}else{var v=function(t){return-1!==O.indexOf(t)},b=v("n"),j=v("s"),V=v("w"),W=v("e"),X=b?k+i:k,Z=j?A+i:A,J=V?M+n:M,K=W?S+n:S;D&&(b&&(X=k-i),j&&(Z=A-i)),(!D&&Z-X>10||D&&X-Z>10)&&(B(E,r[E]=D?X:Y(X)),B(L,r[L]=D?Z:Y(Z))),K-J>10&&(B(C,r[C]=z?J:G(J)),B(P,r[P]=z?K:G(K)))}e.attr("d",_(t,r)),Q(s,r)}function Q(t,e){(z||D)&&function(){var r="path"!==e.type,n=t.selectAll(".visual-cue").data([0]);n.enter().append("path").attr({fill:"#fff","fill-rule":"evenodd",stroke:"#000","stroke-width":1}).classed("visual-cue",!0);var a=H(z?e.xanchor:i.midRange(r?[e.x0,e.x1]:m.extractPathCoords(e.path,d.paramIsX))),o=q(D?e.yanchor:i.midRange(r?[e.y0,e.y1]:m.extractPathCoords(e.path,d.paramIsY)));if(a=m.roundPositionForSharpStrokeRendering(a,1),o=m.roundPositionForSharpStrokeRendering(o,1),z&&D){var s="M"+(a-1-1)+","+(o-1-1)+"h-8v2h8 v8h2v-8 h8v-2h-8 v-8h-2 Z";n.attr("d",s)}else if(z){var l="M"+(a-1-1)+","+(o-9-1)+"v18 h2 v-18 Z";n.attr("d",l)}else{var c="M"+(a-9-1)+","+(o-1-1)+"h18 v2 h-18 Z";n.attr("d",c)}}()}function $(t){t.selectAll(".visual-cue").remove()}h.init(X),W.node().onmousemove=Z}(t,z,l,e,r,O):!0===l.editable&&z.style("pointer-events",P||c.opacity(S)*M<=.5?"stroke":"all");z.node().addEventListener("click",(function(){return function(t,e){if(!y(t))return;var r=+e.node().getAttribute("data-index");if(r>=0){if(r===t._fullLayout._activeShapeIndex)return void T(t);t._fullLayout._activeShapeIndex=r,t._fullLayout._deactivateShape=T,g(t)}}(t,z)}))}}function b(t,e,r){var n=(r.xref+r.yref).replace(/paper/g,"").replace(/[xyz][1-9]* *domain/g,"");u.setClipUrl(t,n?"clip"+e._fullLayout._uid+n:null,e)}function _(t,e){var r,n,o,s,l,c,u,f,h=e.type,p=a.getRefType(e.xref),g=a.getRefType(e.yref),v=a.getFromId(t,e.xref),y=a.getFromId(t,e.yref),x=t._fullLayout._size;if(v?"domain"===p?n=function(t){return v._offset+v._length*t}:(r=m.shapePositionToRange(v),n=function(t){return v._offset+v.r2p(r(t,!0))}):n=function(t){return x.l+x.w*t},y?"domain"===g?s=function(t){return y._offset+y._length*(1-t)}:(o=m.shapePositionToRange(y),s=function(t){return y._offset+y.r2p(o(t,!0))}):s=function(t){return x.t+x.h*(1-t)},"path"===h)return v&&"date"===v.type&&(n=m.decodeDate(n)),y&&"date"===y.type&&(s=m.decodeDate(s)),function(t,e,r){var n=t.path,a=t.xsizemode,o=t.ysizemode,s=t.xanchor,l=t.yanchor;return n.replace(d.segmentRE,(function(t){var n=0,c=t.charAt(0),u=d.paramIsX[c],f=d.paramIsY[c],h=d.numParams[c],p=t.substr(1).replace(d.paramRE,(function(t){return u[n]?t="pixel"===a?e(s)+Number(t):e(t):f[n]&&(t="pixel"===o?r(l)-Number(t):r(t)),++n>h&&(t="X"),t}));return n>h&&(p=p.replace(/[\s,]*X.*/,""),i.log("Ignoring extra params in segment "+t)),c+p}))}(e,n,s);if("pixel"===e.xsizemode){var b=n(e.xanchor);l=b+e.x0,c=b+e.x1}else l=n(e.x0),c=n(e.x1);if("pixel"===e.ysizemode){var _=s(e.yanchor);u=_-e.y0,f=_-e.y1}else u=s(e.y0),f=s(e.y1);if("line"===h)return"M"+l+","+u+"L"+c+","+f;if("rect"===h)return"M"+l+","+u+"H"+c+"V"+f+"H"+l+"Z";var w=(l+c)/2,T=(u+f)/2,k=Math.abs(w-l),A=Math.abs(T-u),M="A"+k+","+A,S=w+k+","+T;return"M"+S+M+" 0 1,1 "+(w+","+(T-A))+M+" 0 0,1 "+S+"Z"}function w(t,e,r){return t.replace(d.segmentRE,(function(t){var n=0,i=t.charAt(0),a=d.paramIsX[i],o=d.paramIsY[i],s=d.numParams[i];return i+t.substr(1).replace(d.paramRE,(function(t){return n>=s||(a[n]?t=e(t):o[n]&&(t=r(t)),n++),t}))}))}function T(t){y(t)&&(t._fullLayout._activeShapeIndex>=0&&(l(t),delete t._fullLayout._activeShapeIndex,g(t)))}e.exports={draw:g,drawOne:x,eraseActiveShape:function(t){if(!y(t))return;l(t);var e=t._fullLayout._activeShapeIndex,r=(t.layout||{}).shapes||[];if(e=0&&f(v),r.attr("d",m(e)),A&&!h)&&(k=function(t,e){for(var r=0;r1&&(2!==t.length||"Z"!==t[1][0])&&(0===T&&(t[0][0]="M"),e[w]=t,y(),x())}}()}}function I(t,r){!function(t,r){if(e.length)for(var n=0;n0&&l0&&(s=s.transition().duration(e.transition.duration).ease(e.transition.easing)),s.attr("transform",l(o-.5*f.gripWidth,e._dims.currentValueTotalHeight))}}function E(t,e){var r=t._dims;return r.inputAreaStart+f.stepInset+(r.inputAreaLength-2*f.stepInset)*Math.min(1,Math.max(0,e))}function L(t,e){var r=t._dims;return Math.min(1,Math.max(0,(e-f.stepInset-r.inputAreaStart)/(r.inputAreaLength-2*f.stepInset-2*r.inputAreaStart)))}function C(t,e,r){var n=r._dims,i=s.ensureSingle(t,"rect",f.railTouchRectClass,(function(n){n.call(A,e,t,r).style("pointer-events","all")}));i.attr({width:n.inputAreaLength,height:Math.max(n.inputAreaWidth,f.tickOffset+r.ticklen+n.labelHeight)}).call(a.fill,r.bgcolor).attr("opacity",0),o.setTranslate(i,0,n.currentValueTotalHeight)}function P(t,e){var r=e._dims,n=r.inputAreaLength-2*f.railInset,i=s.ensureSingle(t,"rect",f.railRectClass);i.attr({width:n,height:f.railWidth,rx:f.railRadius,ry:f.railRadius,"shape-rendering":"crispEdges"}).call(a.stroke,e.bordercolor).call(a.fill,e.bgcolor).style("stroke-width",e.borderwidth+"px"),o.setTranslate(i,f.railInset,.5*(r.inputAreaWidth-f.railWidth)+r.currentValueTotalHeight)}e.exports=function(t){var e=t._fullLayout,r=function(t,e){for(var r=t[f.name],n=[],i=0;i0?[0]:[]);function s(e){e._commandObserver&&(e._commandObserver.remove(),delete e._commandObserver),i.autoMargin(t,g(e))}if(a.enter().append("g").classed(f.containerClassName,!0).style("cursor","ew-resize"),a.exit().each((function(){n.select(this).selectAll("g."+f.groupClassName).each(s)})).remove(),0!==r.length){var l=a.selectAll("g."+f.groupClassName).data(r,v);l.enter().append("g").classed(f.groupClassName,!0),l.exit().each(s).remove();for(var c=0;c0||h<0){var v={left:[-d,0],right:[d,0],top:[0,-d],bottom:[0,d]}[b.side];e.attr("transform",l(v[0],v[1]))}}}return R.call(F),O&&(E?R.on(".opacity",null):(A=0,M=!0,R.text(y).on("mouseover.opacity",(function(){n.select(this).transition().duration(h.SHOW_PLACEHOLDER).style("opacity",1)})).on("mouseout.opacity",(function(){n.select(this).transition().duration(h.HIDE_PLACEHOLDER).style("opacity",0)}))),R.call(f.makeEditable,{gd:t}).on("edit",(function(e){void 0!==x?o.call("_guiRestyle",t,v,e,x):o.call("_guiRelayout",t,v,e)})).on("cancel",(function(){this.text(this.attr("data-unformatted")).call(F)})).on("input",(function(t){this.text(t||" ").call(f.positionText,_.x,_.y)}))),R.classed("js-placeholder",M),T}}},{"../../constants/alignment":466,"../../constants/interactions":473,"../../lib":498,"../../lib/svg_text_utils":524,"../../plots/plots":614,"../../registry":633,"../color":361,"../drawing":383,"@plotly/d3":58,"fast-isnumeric":185}],460:[function(t,e,r){"use strict";var n=t("../../plots/font_attributes"),i=t("../color/attributes"),a=t("../../lib/extend").extendFlat,o=t("../../plot_api/edit_types").overrideAll,s=t("../../plots/pad_attributes"),l=t("../../plot_api/plot_template").templatedArray,c=l("button",{visible:{valType:"boolean"},method:{valType:"enumerated",values:["restyle","relayout","animate","update","skip"],dflt:"restyle"},args:{valType:"info_array",freeLength:!0,items:[{valType:"any"},{valType:"any"},{valType:"any"}]},args2:{valType:"info_array",freeLength:!0,items:[{valType:"any"},{valType:"any"},{valType:"any"}]},label:{valType:"string",dflt:""},execute:{valType:"boolean",dflt:!0}});e.exports=o(l("updatemenu",{_arrayAttrRegexps:[/^updatemenus\[(0|[1-9][0-9]+)\]\.buttons/],visible:{valType:"boolean"},type:{valType:"enumerated",values:["dropdown","buttons"],dflt:"dropdown"},direction:{valType:"enumerated",values:["left","right","up","down"],dflt:"down"},active:{valType:"integer",min:-1,dflt:0},showactive:{valType:"boolean",dflt:!0},buttons:c,x:{valType:"number",min:-2,max:3,dflt:-.05},xanchor:{valType:"enumerated",values:["auto","left","center","right"],dflt:"right"},y:{valType:"number",min:-2,max:3,dflt:1},yanchor:{valType:"enumerated",values:["auto","top","middle","bottom"],dflt:"top"},pad:a(s({editType:"arraydraw"}),{}),font:n({}),bgcolor:{valType:"color"},bordercolor:{valType:"color",dflt:i.borderLine},borderwidth:{valType:"number",min:0,dflt:1,editType:"arraydraw"}}),"arraydraw","from-root")},{"../../lib/extend":488,"../../plot_api/edit_types":531,"../../plot_api/plot_template":538,"../../plots/font_attributes":580,"../../plots/pad_attributes":613,"../color/attributes":360}],461:[function(t,e,r){"use strict";e.exports={name:"updatemenus",containerClassName:"updatemenu-container",headerGroupClassName:"updatemenu-header-group",headerClassName:"updatemenu-header",headerArrowClassName:"updatemenu-header-arrow",dropdownButtonGroupClassName:"updatemenu-dropdown-button-group",dropdownButtonClassName:"updatemenu-dropdown-button",buttonClassName:"updatemenu-button",itemRectClassName:"updatemenu-item-rect",itemTextClassName:"updatemenu-item-text",menuIndexAttrName:"updatemenu-active-index",autoMarginIdRoot:"updatemenu-",blankHeaderOpts:{label:" "},minWidth:30,minHeight:30,textPadX:24,arrowPadX:16,rx:2,ry:2,textOffsetX:12,textOffsetY:3,arrowOffsetX:4,gapButtonHeader:5,gapButton:2,activeColor:"#F4FAFF",hoverColor:"#F4FAFF",arrowSymbol:{left:"\u25c4",right:"\u25ba",up:"\u25b2",down:"\u25bc"}}},{}],462:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../plots/array_container_defaults"),a=t("./attributes"),o=t("./constants").name,s=a.buttons;function l(t,e,r){function o(r,i){return n.coerce(t,e,a,r,i)}o("visible",i(t,e,{name:"buttons",handleItemDefaults:c}).length>0)&&(o("active"),o("direction"),o("type"),o("showactive"),o("x"),o("y"),n.noneOrAll(t,e,["x","y"]),o("xanchor"),o("yanchor"),o("pad.t"),o("pad.r"),o("pad.b"),o("pad.l"),n.coerceFont(o,"font",r.font),o("bgcolor",r.paper_bgcolor),o("bordercolor"),o("borderwidth"))}function c(t,e){function r(r,i){return n.coerce(t,e,s,r,i)}r("visible","skip"===t.method||Array.isArray(t.args))&&(r("method"),r("args"),r("args2"),r("label"),r("execute"))}e.exports=function(t,e){i(t,e,{name:o,handleItemDefaults:l})}},{"../../lib":498,"../../plots/array_container_defaults":544,"./attributes":460,"./constants":461}],463:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../plots/plots"),a=t("../color"),o=t("../drawing"),s=t("../../lib"),l=t("../../lib/svg_text_utils"),c=t("../../plot_api/plot_template").arrayEditor,u=t("../../constants/alignment").LINE_SPACING,f=t("./constants"),h=t("./scrollbox");function p(t){return t._index}function d(t,e){return+t.attr(f.menuIndexAttrName)===e._index}function m(t,e,r,n,i,a,o,s){e.active=o,c(t.layout,f.name,e).applyUpdate("active",o),"buttons"===e.type?v(t,n,null,null,e):"dropdown"===e.type&&(i.attr(f.menuIndexAttrName,"-1"),g(t,n,i,a,e),s||v(t,n,i,a,e))}function g(t,e,r,n,i){var a=s.ensureSingle(e,"g",f.headerClassName,(function(t){t.style("pointer-events","all")})),l=i._dims,c=i.active,u=i.buttons[c]||f.blankHeaderOpts,h={y:i.pad.t,yPad:0,x:i.pad.l,xPad:0,index:0},p={width:l.headerWidth,height:l.headerHeight};a.call(y,i,u,t).call(M,i,h,p),s.ensureSingle(e,"text",f.headerArrowClassName,(function(t){t.attr("text-anchor","end").call(o.font,i.font).text(f.arrowSymbol[i.direction])})).attr({x:l.headerWidth-f.arrowOffsetX+i.pad.l,y:l.headerHeight/2+f.textOffsetY+i.pad.t}),a.on("click",(function(){r.call(S,String(d(r,i)?-1:i._index)),v(t,e,r,n,i)})),a.on("mouseover",(function(){a.call(w)})),a.on("mouseout",(function(){a.call(T,i)})),o.setTranslate(e,l.lx,l.ly)}function v(t,e,r,a,o){r||(r=e).attr("pointer-events","all");var l=function(t){return-1==+t.attr(f.menuIndexAttrName)}(r)&&"buttons"!==o.type?[]:o.buttons,c="dropdown"===o.type?f.dropdownButtonClassName:f.buttonClassName,u=r.selectAll("g."+c).data(s.filterVisible(l)),h=u.enter().append("g").classed(c,!0),p=u.exit();"dropdown"===o.type?(h.attr("opacity","0").transition().attr("opacity","1"),p.transition().attr("opacity","0").remove()):p.remove();var d=0,g=0,v=o._dims,x=-1!==["up","down"].indexOf(o.direction);"dropdown"===o.type&&(x?g=v.headerHeight+f.gapButtonHeader:d=v.headerWidth+f.gapButtonHeader),"dropdown"===o.type&&"up"===o.direction&&(g=-f.gapButtonHeader+f.gapButton-v.openHeight),"dropdown"===o.type&&"left"===o.direction&&(d=-f.gapButtonHeader+f.gapButton-v.openWidth);var b={x:v.lx+d+o.pad.l,y:v.ly+g+o.pad.t,yPad:f.gapButton,xPad:f.gapButton,index:0},k={l:b.x+o.borderwidth,t:b.y+o.borderwidth};u.each((function(s,l){var c=n.select(this);c.call(y,o,s,t).call(M,o,b),c.on("click",(function(){n.event.defaultPrevented||(s.execute&&(s.args2&&o.active===l?(m(t,o,0,e,r,a,-1),i.executeAPICommand(t,s.method,s.args2)):(m(t,o,0,e,r,a,l),i.executeAPICommand(t,s.method,s.args))),t.emit("plotly_buttonclicked",{menu:o,button:s,active:o.active}))})),c.on("mouseover",(function(){c.call(w)})),c.on("mouseout",(function(){c.call(T,o),u.call(_,o)}))})),u.call(_,o),x?(k.w=Math.max(v.openWidth,v.headerWidth),k.h=b.y-k.t):(k.w=b.x-k.l,k.h=Math.max(v.openHeight,v.headerHeight)),k.direction=o.direction,a&&(u.size()?function(t,e,r,n,i,a){var o,s,l,c=i.direction,u="up"===c||"down"===c,h=i._dims,p=i.active;if(u)for(s=0,l=0;l0?[0]:[]);if(o.enter().append("g").classed(f.containerClassName,!0).style("cursor","pointer"),o.exit().each((function(){n.select(this).selectAll("g."+f.headerGroupClassName).each(a)})).remove(),0!==r.length){var l=o.selectAll("g."+f.headerGroupClassName).data(r,p);l.enter().append("g").classed(f.headerGroupClassName,!0);for(var c=s.ensureSingle(o,"g",f.dropdownButtonGroupClassName,(function(t){t.style("pointer-events","all")})),u=0;uw,A=s.barLength+2*s.barPad,M=s.barWidth+2*s.barPad,S=d,E=g+v;E+M>c&&(E=c-M);var L=this.container.selectAll("rect.scrollbar-horizontal").data(k?[0]:[]);L.exit().on(".drag",null).remove(),L.enter().append("rect").classed("scrollbar-horizontal",!0).call(i.fill,s.barColor),k?(this.hbar=L.attr({rx:s.barRadius,ry:s.barRadius,x:S,y:E,width:A,height:M}),this._hbarXMin=S+A/2,this._hbarTranslateMax=w-A):(delete this.hbar,delete this._hbarXMin,delete this._hbarTranslateMax);var C=v>T,P=s.barWidth+2*s.barPad,I=s.barLength+2*s.barPad,O=d+m,z=g;O+P>l&&(O=l-P);var D=this.container.selectAll("rect.scrollbar-vertical").data(C?[0]:[]);D.exit().on(".drag",null).remove(),D.enter().append("rect").classed("scrollbar-vertical",!0).call(i.fill,s.barColor),C?(this.vbar=D.attr({rx:s.barRadius,ry:s.barRadius,x:O,y:z,width:P,height:I}),this._vbarYMin=z+I/2,this._vbarTranslateMax=T-I):(delete this.vbar,delete this._vbarYMin,delete this._vbarTranslateMax);var R=this.id,F=u-.5,B=C?f+P+.5:f+.5,N=h-.5,j=k?p+M+.5:p+.5,U=o._topdefs.selectAll("#"+R).data(k||C?[0]:[]);if(U.exit().remove(),U.enter().append("clipPath").attr("id",R).append("rect"),k||C?(this._clipRect=U.select("rect").attr({x:Math.floor(F),y:Math.floor(N),width:Math.ceil(B)-Math.floor(F),height:Math.ceil(j)-Math.floor(N)}),this.container.call(a.setClipUrl,R,this.gd),this.bg.attr({x:d,y:g,width:m,height:v})):(this.bg.attr({width:0,height:0}),this.container.on("wheel",null).on(".drag",null).call(a.setClipUrl,null),delete this._clipRect),k||C){var V=n.behavior.drag().on("dragstart",(function(){n.event.sourceEvent.preventDefault()})).on("drag",this._onBoxDrag.bind(this));this.container.on("wheel",null).on("wheel",this._onBoxWheel.bind(this)).on(".drag",null).call(V);var H=n.behavior.drag().on("dragstart",(function(){n.event.sourceEvent.preventDefault(),n.event.sourceEvent.stopPropagation()})).on("drag",this._onBarDrag.bind(this));k&&this.hbar.on(".drag",null).call(H),C&&this.vbar.on(".drag",null).call(H)}this.setTranslate(e,r)},s.prototype.disable=function(){(this.hbar||this.vbar)&&(this.bg.attr({width:0,height:0}),this.container.on("wheel",null).on(".drag",null).call(a.setClipUrl,null),delete this._clipRect),this.hbar&&(this.hbar.on(".drag",null),this.hbar.remove(),delete this.hbar,delete this._hbarXMin,delete this._hbarTranslateMax),this.vbar&&(this.vbar.on(".drag",null),this.vbar.remove(),delete this.vbar,delete this._vbarYMin,delete this._vbarTranslateMax)},s.prototype._onBoxDrag=function(){var t=this.translateX,e=this.translateY;this.hbar&&(t-=n.event.dx),this.vbar&&(e-=n.event.dy),this.setTranslate(t,e)},s.prototype._onBoxWheel=function(){var t=this.translateX,e=this.translateY;this.hbar&&(t+=n.event.deltaY),this.vbar&&(e+=n.event.deltaY),this.setTranslate(t,e)},s.prototype._onBarDrag=function(){var t=this.translateX,e=this.translateY;if(this.hbar){var r=t+this._hbarXMin,i=r+this._hbarTranslateMax;t=(o.constrain(n.event.x,r,i)-r)/(i-r)*(this.position.w-this._box.w)}if(this.vbar){var a=e+this._vbarYMin,s=a+this._vbarTranslateMax;e=(o.constrain(n.event.y,a,s)-a)/(s-a)*(this.position.h-this._box.h)}this.setTranslate(t,e)},s.prototype.setTranslate=function(t,e){var r=this.position.w-this._box.w,n=this.position.h-this._box.h;if(t=o.constrain(t||0,0,r),e=o.constrain(e||0,0,n),this.translateX=t,this.translateY=e,this.container.call(a.setTranslate,this._box.l-this.position.l-t,this._box.t-this.position.t-e),this._clipRect&&this._clipRect.attr({x:Math.floor(this.position.l+t-.5),y:Math.floor(this.position.t+e-.5)}),this.hbar){var i=t/r;this.hbar.call(a.setTranslate,t+i*this._hbarTranslateMax,e)}if(this.vbar){var s=e/n;this.vbar.call(a.setTranslate,t,e+s*this._vbarTranslateMax)}}},{"../../lib":498,"../color":361,"../drawing":383,"@plotly/d3":58}],466:[function(t,e,r){"use strict";e.exports={FROM_BL:{left:0,center:.5,right:1,bottom:0,middle:.5,top:1},FROM_TL:{left:0,center:.5,right:1,bottom:1,middle:.5,top:0},FROM_BR:{left:1,center:.5,right:0,bottom:0,middle:.5,top:1},LINE_SPACING:1.3,CAP_SHIFT:.7,MID_SHIFT:.35,OPPOSITE_SIDE:{left:"right",right:"left",top:"bottom",bottom:"top"}}},{}],467:[function(t,e,r){"use strict";e.exports={axisRefDescription:function(t,e,r){return["If set to a",t,"axis id (e.g. *"+t+"* or","*"+t+"2*), the `"+t+"` position refers to a",t,"coordinate. If set to *paper*, the `"+t+"`","position refers to the distance from the",e,"of the plotting","area in normalized coordinates where *0* (*1*) corresponds to the",e,"("+r+"). If set to a",t,"axis ID followed by","*domain* (separated by a space), the position behaves like for","*paper*, but refers to the distance in fractions of the domain","length from the",e,"of the domain of that axis: e.g.,","*"+t+"2 domain* refers to the domain of the second",t," axis and a",t,"position of 0.5 refers to the","point between the",e,"and the",r,"of the domain of the","second",t,"axis."].join(" ")}}},{}],468:[function(t,e,r){"use strict";e.exports={INCREASING:{COLOR:"#3D9970",SYMBOL:"\u25b2"},DECREASING:{COLOR:"#FF4136",SYMBOL:"\u25bc"}}},{}],469:[function(t,e,r){"use strict";e.exports={FORMAT_LINK:"https://github.com/d3/d3-format/tree/v1.4.5#d3-format",DATE_FORMAT_LINK:"https://github.com/d3/d3-time-format/tree/v2.2.3#locale_format"}},{}],470:[function(t,e,r){"use strict";e.exports={COMPARISON_OPS:["=","!=","<",">=",">","<="],COMPARISON_OPS2:["=","<",">=",">","<="],INTERVAL_OPS:["[]","()","[)","(]","][",")(","](",")["],SET_OPS:["{}","}{"],CONSTRAINT_REDUCTION:{"=":"=","<":"<","<=":"<",">":">",">=":">","[]":"[]","()":"[]","[)":"[]","(]":"[]","][":"][",")(":"][","](":"][",")[":"]["}}},{}],471:[function(t,e,r){"use strict";e.exports={solid:[[],0],dot:[[.5,1],200],dash:[[.5,1],50],longdash:[[.5,1],10],dashdot:[[.5,.625,.875,1],50],longdashdot:[[.5,.7,.8,1],10]}},{}],472:[function(t,e,r){"use strict";e.exports={circle:"\u25cf","circle-open":"\u25cb",square:"\u25a0","square-open":"\u25a1",diamond:"\u25c6","diamond-open":"\u25c7",cross:"+",x:"\u274c"}},{}],473:[function(t,e,r){"use strict";e.exports={SHOW_PLACEHOLDER:100,HIDE_PLACEHOLDER:1e3,DESELECTDIM:.2}},{}],474:[function(t,e,r){"use strict";e.exports={BADNUM:void 0,FP_SAFE:1e-4*Number.MAX_VALUE,ONEMAXYEAR:316224e5,ONEAVGYEAR:315576e5,ONEMINYEAR:31536e6,ONEMAXQUARTER:79488e5,ONEAVGQUARTER:78894e5,ONEMINQUARTER:76896e5,ONEMAXMONTH:26784e5,ONEAVGMONTH:26298e5,ONEMINMONTH:24192e5,ONEWEEK:6048e5,ONEDAY:864e5,ONEHOUR:36e5,ONEMIN:6e4,ONESEC:1e3,EPOCHJD:2440587.5,ALMOST_EQUAL:.999999,LOG_CLIP:10,MINUS_SIGN:"\u2212"}},{}],475:[function(t,e,r){"use strict";r.xmlns="http://www.w3.org/2000/xmlns/",r.svg="http://www.w3.org/2000/svg",r.xlink="http://www.w3.org/1999/xlink",r.svgAttrs={xmlns:r.svg,"xmlns:xlink":r.xlink}},{}],476:[function(t,e,r){"use strict";r.version=t("./version").version,t("native-promise-only"),t("../build/plotcss");for(var n=t("./registry"),i=r.register=n.register,a=t("./plot_api"),o=Object.keys(a),s=0;splotly-logomark"}}},{}],478:[function(t,e,r){"use strict";r.isLeftAnchor=function(t){return"left"===t.xanchor||"auto"===t.xanchor&&t.x<=1/3},r.isCenterAnchor=function(t){return"center"===t.xanchor||"auto"===t.xanchor&&t.x>1/3&&t.x<2/3},r.isRightAnchor=function(t){return"right"===t.xanchor||"auto"===t.xanchor&&t.x>=2/3},r.isTopAnchor=function(t){return"top"===t.yanchor||"auto"===t.yanchor&&t.y>=2/3},r.isMiddleAnchor=function(t){return"middle"===t.yanchor||"auto"===t.yanchor&&t.y>1/3&&t.y<2/3},r.isBottomAnchor=function(t){return"bottom"===t.yanchor||"auto"===t.yanchor&&t.y<=1/3}},{}],479:[function(t,e,r){"use strict";var n=t("./mod"),i=n.mod,a=n.modHalf,o=Math.PI,s=2*o;function l(t){return Math.abs(t[1]-t[0])>s-1e-14}function c(t,e){return a(e-t,s)}function u(t,e){if(l(e))return!0;var r,n;e[0](n=i(n,s))&&(n+=s);var a=i(t,s),o=a+s;return a>=r&&a<=n||o>=r&&o<=n}function f(t,e,r,n,i,a,c){i=i||0,a=a||0;var u,f,h,p,d,m=l([r,n]);function g(t,e){return[t*Math.cos(e)+i,a-t*Math.sin(e)]}m?(u=0,f=o,h=s):r=i&&t<=a);var i,a},pathArc:function(t,e,r,n,i){return f(null,t,e,r,n,i,0)},pathSector:function(t,e,r,n,i){return f(null,t,e,r,n,i,1)},pathAnnulus:function(t,e,r,n,i,a){return f(t,e,r,n,i,a,1)}}},{"./mod":505}],480:[function(t,e,r){"use strict";var n=Array.isArray,i="undefined"!=typeof ArrayBuffer&&ArrayBuffer.isView?ArrayBuffer:{isView:function(){return!1}},a="undefined"==typeof DataView?function(){}:DataView;function o(t){return i.isView(t)&&!(t instanceof a)}function s(t){return n(t)||o(t)}function l(t,e,r){if(s(t)){if(s(t[0])){for(var n=r,i=0;ii.max?e.set(r):e.set(+t)}},integer:{coerceFunction:function(t,e,r,i){t%1||!n(t)||void 0!==i.min&&ti.max?e.set(r):e.set(+t)}},string:{coerceFunction:function(t,e,r,n){if("string"!=typeof t){var i="number"==typeof t;!0!==n.strict&&i?e.set(String(t)):e.set(r)}else n.noBlank&&!t?e.set(r):e.set(t)}},color:{coerceFunction:function(t,e,r){i(t).isValid()?e.set(t):e.set(r)}},colorlist:{coerceFunction:function(t,e,r){Array.isArray(t)&&t.length&&t.every((function(t){return i(t).isValid()}))?e.set(t):e.set(r)}},colorscale:{coerceFunction:function(t,e,r){e.set(o.get(t,r))}},angle:{coerceFunction:function(t,e,r){"auto"===t?e.set("auto"):n(t)?e.set(f(+t,360)):e.set(r)}},subplotid:{coerceFunction:function(t,e,r,n){var i=n.regex||u(r);"string"==typeof t&&i.test(t)?e.set(t):e.set(r)},validateFunction:function(t,e){var r=e.dflt;return t===r||"string"==typeof t&&!!u(r).test(t)}},flaglist:{coerceFunction:function(t,e,r,n){if("string"==typeof t)if(-1===(n.extras||[]).indexOf(t)){for(var i=t.split("+"),a=0;a=n&&t<=i?t:u}if("string"!=typeof t&&"number"!=typeof t)return u;t=String(t);var c=_(e),v=t.charAt(0);!c||"G"!==v&&"g"!==v||(t=t.substr(1),e="");var w=c&&"chinese"===e.substr(0,7),T=t.match(w?x:y);if(!T)return u;var k=T[1],A=T[3]||"1",M=Number(T[5]||1),S=Number(T[7]||0),E=Number(T[9]||0),L=Number(T[11]||0);if(c){if(2===k.length)return u;var C;k=Number(k);try{var P=g.getComponentMethod("calendars","getCal")(e);if(w){var I="i"===A.charAt(A.length-1);A=parseInt(A,10),C=P.newDate(k,P.toMonthIndex(k,A,I),M)}else C=P.newDate(k,Number(A),M)}catch(t){return u}return C?(C.toJD()-m)*f+S*h+E*p+L*d:u}k=2===k.length?(Number(k)+2e3-b)%100+b:Number(k),A-=1;var O=new Date(Date.UTC(2e3,A,M,S,E));return O.setUTCFullYear(k),O.getUTCMonth()!==A||O.getUTCDate()!==M?u:O.getTime()+L*d},n=r.MIN_MS=r.dateTime2ms("-9999"),i=r.MAX_MS=r.dateTime2ms("9999-12-31 23:59:59.9999"),r.isDateTime=function(t,e){return r.dateTime2ms(t,e)!==u};var T=90*f,k=3*h,A=5*p;function M(t,e,r,n,i){if((e||r||n||i)&&(t+=" "+w(e,2)+":"+w(r,2),(n||i)&&(t+=":"+w(n,2),i))){for(var a=4;i%10==0;)a-=1,i/=10;t+="."+w(i,a)}return t}r.ms2DateTime=function(t,e,r){if("number"!=typeof t||!(t>=n&&t<=i))return u;e||(e=0);var a,o,s,c,y,x,b=Math.floor(10*l(t+.05,1)),w=Math.round(t-b/10);if(_(r)){var S=Math.floor(w/f)+m,E=Math.floor(l(t,f));try{a=g.getComponentMethod("calendars","getCal")(r).fromJD(S).formatDate("yyyy-mm-dd")}catch(t){a=v("G%Y-%m-%d")(new Date(w))}if("-"===a.charAt(0))for(;a.length<11;)a="-0"+a.substr(1);else for(;a.length<10;)a="0"+a;o=e=n+f&&t<=i-f))return u;var e=Math.floor(10*l(t+.05,1)),r=new Date(Math.round(t-e/10));return M(a("%Y-%m-%d")(r),r.getHours(),r.getMinutes(),r.getSeconds(),10*r.getUTCMilliseconds()+e)},r.cleanDate=function(t,e,n){if(t===u)return e;if(r.isJSDate(t)||"number"==typeof t&&isFinite(t)){if(_(n))return s.error("JS Dates and milliseconds are incompatible with world calendars",t),e;if(!(t=r.ms2DateTimeLocal(+t))&&void 0!==e)return e}else if(!r.isDateTime(t,n))return s.error("unrecognized date",t),e;return t};var S=/%\d?f/g,E=/%h/g,L={1:"1",2:"1",3:"2",4:"2"};function C(t,e,r,n){t=t.replace(S,(function(t){var r=Math.min(+t.charAt(1)||6,6);return(e/1e3%1+2).toFixed(r).substr(2).replace(/0+$/,"")||"0"}));var i=new Date(Math.floor(e+.05));if(t=t.replace(E,(function(){return L[r("%q")(i)]})),_(n))try{t=g.getComponentMethod("calendars","worldCalFmt")(t,e,n)}catch(t){return"Invalid"}return r(t)(i)}var P=[59,59.9,59.99,59.999,59.9999];r.formatDate=function(t,e,r,n,i,a){if(i=_(i)&&i,!e)if("y"===r)e=a.year;else if("m"===r)e=a.month;else{if("d"!==r)return function(t,e){var r=l(t+.05,f),n=w(Math.floor(r/h),2)+":"+w(l(Math.floor(r/p),60),2);if("M"!==e){o(e)||(e=0);var i=(100+Math.min(l(t/d,60),P[e])).toFixed(e).substr(1);e>0&&(i=i.replace(/0+$/,"").replace(/[\.]$/,"")),n+=":"+i}return n}(t,r)+"\n"+C(a.dayMonthYear,t,n,i);e=a.dayMonth+"\n"+a.year}return C(e,t,n,i)};var I=3*f;r.incrementMonth=function(t,e,r){r=_(r)&&r;var n=l(t,f);if(t=Math.round(t-n),r)try{var i=Math.round(t/f)+m,a=g.getComponentMethod("calendars","getCal")(r),o=a.fromJD(i);return e%12?a.add(o,e,"m"):a.add(o,e/12,"y"),(o.toJD()-m)*f+n}catch(e){s.error("invalid ms "+t+" in calendar "+r)}var c=new Date(t+I);return c.setUTCMonth(c.getUTCMonth()+e)+n-I},r.findExactDates=function(t,e){for(var r,n,i=0,a=0,s=0,l=0,c=_(e)&&g.getComponentMethod("calendars","getCal")(e),u=0;u0&&t[e+1][0]<0)return e;return null}switch(e="RUS"===s||"FJI"===s?function(t){var e;if(null===c(t))e=t;else for(e=new Array(t.length),i=0;ie?r[n++]=[t[i][0]+360,t[i][1]]:i===e?(r[n++]=t[i],r[n++]=[t[i][0],-90]):r[n++]=t[i];var a=h.tester(r);a.pts.pop(),l.push(a)}:function(t){l.push(h.tester(t))},a.type){case"MultiPolygon":for(r=0;ri&&(i=c,e=l)}else e=r;return o.default(e).geometry.coordinates}(u),n.fIn=t,n.fOut=u,s.push(u)}else c.log(["Location",n.loc,"does not have a valid GeoJSON geometry.","Traces with locationmode *geojson-id* only support","*Polygon* and *MultiPolygon* geometries."].join(" "))}delete i[r]}switch(r.type){case"FeatureCollection":var h=r.features;for(n=0;n100?(clearInterval(a),n("Unexpected error while fetching from "+t)):void i++}),50)}))}for(var o=0;o0&&(r.push(i),i=[])}return i.length>0&&r.push(i),r},r.makeLine=function(t){return 1===t.length?{type:"LineString",coordinates:t[0]}:{type:"MultiLineString",coordinates:t}},r.makePolygon=function(t){if(1===t.length)return{type:"Polygon",coordinates:t};for(var e=new Array(t.length),r=0;r1||m<0||m>1?null:{x:t+l*m,y:e+f*m}}function l(t,e,r,n,i){var a=n*t+i*e;if(a<0)return n*n+i*i;if(a>r){var o=n-t,s=i-e;return o*o+s*s}var l=n*e-i*t;return l*l/r}r.segmentsIntersect=s,r.segmentDistance=function(t,e,r,n,i,a,o,c){if(s(t,e,r,n,i,a,o,c))return 0;var u=r-t,f=n-e,h=o-i,p=c-a,d=u*u+f*f,m=h*h+p*p,g=Math.min(l(u,f,d,i-t,a-e),l(u,f,d,o-t,c-e),l(h,p,m,t-i,e-a),l(h,p,m,r-i,n-a));return Math.sqrt(g)},r.getTextLocation=function(t,e,r,s){if(t===i&&s===a||(n={},i=t,a=s),n[r])return n[r];var l=t.getPointAtLength(o(r-s/2,e)),c=t.getPointAtLength(o(r+s/2,e)),u=Math.atan((c.y-l.y)/(c.x-l.x)),f=t.getPointAtLength(o(r,e)),h={x:(4*f.x+l.x+c.x)/6,y:(4*f.y+l.y+c.y)/6,theta:u};return n[r]=h,h},r.clearLocationCache=function(){i=null},r.getVisibleSegment=function(t,e,r){var n,i,a=e.left,o=e.right,s=e.top,l=e.bottom,c=0,u=t.getTotalLength(),f=u;function h(e){var r=t.getPointAtLength(e);0===e?n=r:e===u&&(i=r);var c=r.xo?r.x-o:0,f=r.yl?r.y-l:0;return Math.sqrt(c*c+f*f)}for(var p=h(c);p;){if((c+=p+r)>f)return;p=h(c)}for(p=h(f);p;){if(c>(f-=p+r))return;p=h(f)}return{min:c,max:f,len:f-c,total:u,isClosed:0===c&&f===u&&Math.abs(n.x-i.x)<.1&&Math.abs(n.y-i.y)<.1}},r.findPointOnPath=function(t,e,r,n){for(var i,a,o,s=(n=n||{}).pathLength||t.getTotalLength(),l=n.tolerance||.001,c=n.iterationLimit||30,u=t.getPointAtLength(0)[r]>t.getPointAtLength(s)[r]?-1:1,f=0,h=0,p=s;f0?p=i:h=i,f++}return a}},{"./mod":505}],494:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("tinycolor2"),a=t("color-normalize"),o=t("../components/colorscale"),s=t("../components/color/attributes").defaultLine,l=t("./array").isArrayOrTypedArray,c=a(s);function u(t,e){var r=t;return r[3]*=e,r}function f(t){if(n(t))return c;var e=a(t);return e.length?e:c}function h(t){return n(t)?t:1}e.exports={formatColor:function(t,e,r){var n,i,s,p,d,m=t.color,g=l(m),v=l(e),y=o.extractOpts(t),x=[];if(n=void 0!==y.colorscale?o.makeColorScaleFuncFromTrace(t):f,i=g?function(t,e){return void 0===t[e]?c:a(n(t[e]))}:f,s=v?function(t,e){return void 0===t[e]?1:h(t[e])}:h,g||v)for(var b=0;b1?(r*t+r*e)/r:t+e,i=String(n).length;if(i>16){var a=String(e).length;if(i>=String(t).length+a){var o=parseFloat(n).toPrecision(12);-1===o.indexOf("e+")&&(n=+o)}}return n}},{}],498:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("d3-time-format").utcFormat,a=t("d3-format").format,o=t("fast-isnumeric"),s=t("../constants/numerical"),l=s.FP_SAFE,c=-l,u=s.BADNUM,f=e.exports={};f.adjustFormat=function(t){return!t||/^\d[.]\df/.test(t)||/[.]\d%/.test(t)?t:"0.f"===t?"~f":/^\d%/.test(t)?"~%":/^\ds/.test(t)?"~s":!/^[~,.0$]/.test(t)&&/[&fps]/.test(t)?"~"+t:t};var h={};f.warnBadFormat=function(t){var e=String(t);h[e]||(h[e]=1,f.warn('encountered bad format: "'+e+'"'))},f.noFormat=function(t){return String(t)},f.numberFormat=function(t){var e;try{e=a(f.adjustFormat(t))}catch(e){return f.warnBadFormat(t),f.noFormat}return e},f.nestedProperty=t("./nested_property"),f.keyedContainer=t("./keyed_container"),f.relativeAttr=t("./relative_attr"),f.isPlainObject=t("./is_plain_object"),f.toLogRange=t("./to_log_range"),f.relinkPrivateKeys=t("./relink_private");var p=t("./array");f.isTypedArray=p.isTypedArray,f.isArrayOrTypedArray=p.isArrayOrTypedArray,f.isArray1D=p.isArray1D,f.ensureArray=p.ensureArray,f.concat=p.concat,f.maxRowLength=p.maxRowLength,f.minRowLength=p.minRowLength;var d=t("./mod");f.mod=d.mod,f.modHalf=d.modHalf;var m=t("./coerce");f.valObjectMeta=m.valObjectMeta,f.coerce=m.coerce,f.coerce2=m.coerce2,f.coerceFont=m.coerceFont,f.coercePattern=m.coercePattern,f.coerceHoverinfo=m.coerceHoverinfo,f.coerceSelectionMarkerOpacity=m.coerceSelectionMarkerOpacity,f.validate=m.validate;var g=t("./dates");f.dateTime2ms=g.dateTime2ms,f.isDateTime=g.isDateTime,f.ms2DateTime=g.ms2DateTime,f.ms2DateTimeLocal=g.ms2DateTimeLocal,f.cleanDate=g.cleanDate,f.isJSDate=g.isJSDate,f.formatDate=g.formatDate,f.incrementMonth=g.incrementMonth,f.dateTick0=g.dateTick0,f.dfltRange=g.dfltRange,f.findExactDates=g.findExactDates,f.MIN_MS=g.MIN_MS,f.MAX_MS=g.MAX_MS;var v=t("./search");f.findBin=v.findBin,f.sorterAsc=v.sorterAsc,f.sorterDes=v.sorterDes,f.distinctVals=v.distinctVals,f.roundUp=v.roundUp,f.sort=v.sort,f.findIndexOfMin=v.findIndexOfMin,f.sortObjectKeys=t("./sort_object_keys");var y=t("./stats");f.aggNums=y.aggNums,f.len=y.len,f.mean=y.mean,f.median=y.median,f.midRange=y.midRange,f.variance=y.variance,f.stdev=y.stdev,f.interp=y.interp;var x=t("./matrix");f.init2dArray=x.init2dArray,f.transposeRagged=x.transposeRagged,f.dot=x.dot,f.translationMatrix=x.translationMatrix,f.rotationMatrix=x.rotationMatrix,f.rotationXYMatrix=x.rotationXYMatrix,f.apply3DTransform=x.apply3DTransform,f.apply2DTransform=x.apply2DTransform,f.apply2DTransform2=x.apply2DTransform2,f.convertCssMatrix=x.convertCssMatrix,f.inverseTransformMatrix=x.inverseTransformMatrix;var b=t("./angles");f.deg2rad=b.deg2rad,f.rad2deg=b.rad2deg,f.angleDelta=b.angleDelta,f.angleDist=b.angleDist,f.isFullCircle=b.isFullCircle,f.isAngleInsideSector=b.isAngleInsideSector,f.isPtInsideSector=b.isPtInsideSector,f.pathArc=b.pathArc,f.pathSector=b.pathSector,f.pathAnnulus=b.pathAnnulus;var _=t("./anchor_utils");f.isLeftAnchor=_.isLeftAnchor,f.isCenterAnchor=_.isCenterAnchor,f.isRightAnchor=_.isRightAnchor,f.isTopAnchor=_.isTopAnchor,f.isMiddleAnchor=_.isMiddleAnchor,f.isBottomAnchor=_.isBottomAnchor;var w=t("./geometry2d");f.segmentsIntersect=w.segmentsIntersect,f.segmentDistance=w.segmentDistance,f.getTextLocation=w.getTextLocation,f.clearLocationCache=w.clearLocationCache,f.getVisibleSegment=w.getVisibleSegment,f.findPointOnPath=w.findPointOnPath;var T=t("./extend");f.extendFlat=T.extendFlat,f.extendDeep=T.extendDeep,f.extendDeepAll=T.extendDeepAll,f.extendDeepNoArrays=T.extendDeepNoArrays;var k=t("./loggers");f.log=k.log,f.warn=k.warn,f.error=k.error;var A=t("./regex");f.counterRegex=A.counter;var M=t("./throttle");f.throttle=M.throttle,f.throttleDone=M.done,f.clearThrottle=M.clear;var S=t("./dom");function E(t){var e={};for(var r in t)for(var n=t[r],i=0;il||t=e)&&(o(t)&&t>=0&&t%1==0)},f.noop=t("./noop"),f.identity=t("./identity"),f.repeat=function(t,e){for(var r=new Array(e),n=0;nr?Math.max(r,Math.min(e,t)):Math.max(e,Math.min(r,t))},f.bBoxIntersect=function(t,e,r){return r=r||0,t.left<=e.right+r&&e.left<=t.right+r&&t.top<=e.bottom+r&&e.top<=t.bottom+r},f.simpleMap=function(t,e,r,n,i){for(var a=t.length,o=new Array(a),s=0;s=Math.pow(2,r)?i>10?(f.warn("randstr failed uniqueness"),l):t(e,r,n,(i||0)+1):l},f.OptionControl=function(t,e){t||(t={}),e||(e="opt");var r={optionList:[],_newoption:function(n){n[e]=t,r[n.name]=n,r.optionList.push(n)}};return r["_"+e]=t,r},f.smooth=function(t,e){if((e=Math.round(e)||0)<2)return t;var r,n,i,a,o=t.length,s=2*o,l=2*e-1,c=new Array(l),u=new Array(o);for(r=0;r=s&&(i-=s*Math.floor(i/s)),i<0?i=-1-i:i>=o&&(i=s-1-i),a+=t[i]*c[n];u[r]=a}return u},f.syncOrAsync=function(t,e,r){var n;function i(){return f.syncOrAsync(t,e,r)}for(;t.length;)if((n=(0,t.splice(0,1)[0])(e))&&n.then)return n.then(i);return r&&r(e)},f.stripTrailingSlash=function(t){return"/"===t.substr(-1)?t.substr(0,t.length-1):t},f.noneOrAll=function(t,e,r){if(t){var n,i=!1,a=!0;for(n=0;n0?e:0}))},f.fillArray=function(t,e,r,n){if(n=n||f.identity,f.isArrayOrTypedArray(t))for(var i=0;i1?i+o[1]:"";if(a&&(o.length>1||s.length>4||r))for(;n.test(s);)s=s.replace(n,"$1"+a+"$2");return s+l},f.TEMPLATE_STRING_REGEX=/%{([^\s%{}:]*)([:|\|][^}]*)?}/g;var z=/^\w*$/;f.templateString=function(t,e){var r={};return t.replace(f.TEMPLATE_STRING_REGEX,(function(t,n){var i;return z.test(n)?i=e[n]:(r[n]=r[n]||f.nestedProperty(e,n).get,i=r[n]()),f.isValidTextValue(i)?i:""}))};var D={max:10,count:0,name:"hovertemplate"};f.hovertemplateString=function(){return B.apply(D,arguments)};var R={max:10,count:0,name:"texttemplate"};f.texttemplateString=function(){return B.apply(R,arguments)};var F=/^[:|\|]/;function B(t,e,r){var n=this,a=arguments;e||(e={});var o={};return t.replace(f.TEMPLATE_STRING_REGEX,(function(t,s,l){var c,u,h,p="_xother"===s||"_yother"===s,d="_xother_"===s||"_yother_"===s,m="xother_"===s||"yother_"===s,g="xother"===s||"yother"===s||p||m||d,v=s;if((p||d)&&(v=v.substring(1)),(m||d)&&(v=v.substring(0,v.length-1)),g){if(void 0===(c=e[v]))return""}else for(h=3;h=48&&o<=57,c=s>=48&&s<=57;if(l&&(n=10*n+o-48),c&&(i=10*i+s-48),!l||!c){if(n!==i)return n-i;if(o!==s)return o-s}}return i-n};var N=2e9;f.seedPseudoRandom=function(){N=2e9},f.pseudoRandom=function(){var t=N;return N=(69069*N+1)%4294967296,Math.abs(N-t)<429496729?f.pseudoRandom():N/4294967296},f.fillText=function(t,e,r){var n=Array.isArray(r)?function(t){r.push(t)}:function(t){r.text=t},i=f.extractOption(t,e,"htx","hovertext");if(f.isValidTextValue(i))return n(i);var a=f.extractOption(t,e,"tx","text");return f.isValidTextValue(a)?n(a):void 0},f.isValidTextValue=function(t){return t||0===t},f.formatPercent=function(t,e){e=e||0;for(var r=(Math.round(100*t*Math.pow(10,e))*Math.pow(.1,e)).toFixed(e)+"%",n=0;n1&&(c=1):c=0,f.strTranslate(i-c*(r+o),a-c*(n+s))+f.strScale(c)+(l?"rotate("+l+(e?"":" "+r+" "+n)+")":"")},f.ensureUniformFontSize=function(t,e){var r=f.extendFlat({},e);return r.size=Math.max(e.size,t._fullLayout.uniformtext.minsize||0),r},f.join2=function(t,e,r){var n=t.length;return n>1?t.slice(0,-1).join(e)+r+t[n-1]:t.join(e)},f.bigFont=function(t){return Math.round(1.2*t)};var j=f.getFirefoxVersion(),U=null!==j&&j<86;f.getPositionFromD3Event=function(){return U?[n.event.layerX,n.event.layerY]:[n.event.offsetX,n.event.offsetY]}},{"../constants/numerical":474,"./anchor_utils":478,"./angles":479,"./array":480,"./clean_number":481,"./clear_responsive":483,"./coerce":484,"./dates":485,"./dom":486,"./extend":488,"./filter_unique":489,"./filter_visible":490,"./geometry2d":493,"./identity":496,"./increment":497,"./is_plain_object":499,"./keyed_container":500,"./localize":501,"./loggers":502,"./make_trace_groups":503,"./matrix":504,"./mod":505,"./nested_property":506,"./noop":507,"./notifier":508,"./preserve_drawing_buffer":512,"./push_unique":513,"./regex":515,"./relative_attr":516,"./relink_private":517,"./search":518,"./sort_object_keys":521,"./stats":522,"./throttle":525,"./to_log_range":526,"@plotly/d3":58,"d3-format":107,"d3-time-format":115,"fast-isnumeric":185}],499:[function(t,e,r){"use strict";e.exports=function(t){return window&&window.process&&window.process.versions?"[object Object]"===Object.prototype.toString.call(t):"[object Object]"===Object.prototype.toString.call(t)&&Object.getPrototypeOf(t).hasOwnProperty("hasOwnProperty")}},{}],500:[function(t,e,r){"use strict";var n=t("./nested_property"),i=/^\w*$/;e.exports=function(t,e,r,a){var o,s,l;r=r||"name",a=a||"value";var c={};e&&e.length?(l=n(t,e),s=l.get()):s=t,e=e||"";var u={};if(s)for(o=0;o2)return c[e]=2|c[e],h.set(t,null);if(f){for(o=e;o1){var e=["LOG:"];for(t=0;t1){var r=[];for(t=0;t"),"long")}},a.warn=function(){var t;if(n.logging>0){var e=["WARN:"];for(t=0;t0){var r=[];for(t=0;t"),"stick")}},a.error=function(){var t;if(n.logging>0){var e=["ERROR:"];for(t=0;t0){var r=[];for(t=0;t"),"stick")}}},{"../plot_api/plot_config":536,"./notifier":508}],503:[function(t,e,r){"use strict";var n=t("@plotly/d3");e.exports=function(t,e,r){var i=t.selectAll("g."+r.replace(/\s/g,".")).data(e,(function(t){return t[0].trace.uid}));i.exit().remove(),i.enter().append("g").attr("class",r),i.order();var a=t.classed("rangeplot")?"nodeRangePlot3":"node3";return i.each((function(t){t[0][a]=n.select(this)})),i}},{"@plotly/d3":58}],504:[function(t,e,r){"use strict";var n=t("gl-mat4");r.init2dArray=function(t,e){for(var r=new Array(t),n=0;ne/2?t-Math.round(t/e)*e:t}}},{}],506:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("./array").isArrayOrTypedArray;function a(t,e){return function(){var r,n,o,s,l,c=t;for(s=0;s/g),l=0;la||c===i||cs)&&(!e||!l(t))}:function(t,e){var l=t[0],c=t[1];if(l===i||la||c===i||cs)return!1;var u,f,h,p,d,m=r.length,g=r[0][0],v=r[0][1],y=0;for(u=1;uMath.max(f,g)||c>Math.max(h,v)))if(cu||Math.abs(n(o,h))>i)return!0;return!1},a.filter=function(t,e){var r=[t[0]],n=0,i=0;function o(o){t.push(o);var s=r.length,l=n;r.splice(i+1);for(var c=l+1;c1&&o(t.pop());return{addPt:o,raw:t,filtered:r}}},{"../constants/numerical":474,"./matrix":504}],511:[function(t,e,r){(function(r){(function(){"use strict";var n=t("./show_no_webgl_msg"),i=t("regl");e.exports=function(t,e,a){var o=t._fullLayout,s=!0;return o._glcanvas.each((function(n){if(n.regl)n.regl.preloadCachedCode(a);else if(!n.pick||o._has("parcoords")){try{n.regl=i({canvas:this,attributes:{antialias:!n.pick,preserveDrawingBuffer:!0},pixelRatio:t._context.plotGlPixelRatio||r.devicePixelRatio,extensions:e||[],cachedCode:a||{}})}catch(t){s=!1}n.regl||(s=!1),s&&this.addEventListener("webglcontextlost",(function(e){t&&t.emit&&t.emit("plotly_webglcontextlost",{event:e,layer:n.key})}),!1)}})),s||n({container:o._glcontainer.node()}),s}}).call(this)}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"./show_no_webgl_msg":520,regl:278}],512:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("is-mobile");e.exports=function(t){var e;if("string"!=typeof(e=t&&t.hasOwnProperty("userAgent")?t.userAgent:function(){var t;"undefined"!=typeof navigator&&(t=navigator.userAgent);t&&t.headers&&"string"==typeof t.headers["user-agent"]&&(t=t.headers["user-agent"]);return t}()))return!0;var r=i({ua:{headers:{"user-agent":e}},tablet:!0,featureDetect:!1});if(!r)for(var a=e.split(" "),o=1;o-1;s--){var l=a[s];if("Version/"===l.substr(0,8)){var c=l.substr(8).split(".")[0];if(n(c)&&(c=+c),c>=13)return!0}}}return r}},{"fast-isnumeric":185,"is-mobile":229}],513:[function(t,e,r){"use strict";e.exports=function(t,e){if(e instanceof RegExp){for(var r=e.toString(),n=0;ni.queueLength&&(t.undoQueue.queue.shift(),t.undoQueue.index--))},startSequence:function(t){t.undoQueue=t.undoQueue||{index:0,queue:[],sequence:!1},t.undoQueue.sequence=!0,t.undoQueue.beginSequence=!0},stopSequence:function(t){t.undoQueue=t.undoQueue||{index:0,queue:[],sequence:!1},t.undoQueue.sequence=!1,t.undoQueue.beginSequence=!1},undo:function(t){var e,r;if(!(void 0===t.undoQueue||isNaN(t.undoQueue.index)||t.undoQueue.index<=0)){for(t.undoQueue.index--,e=t.undoQueue.queue[t.undoQueue.index],t.undoQueue.inSequence=!0,r=0;r=t.undoQueue.queue.length)){for(e=t.undoQueue.queue[t.undoQueue.index],t.undoQueue.inSequence=!0,r=0;re}function u(t,e){return t>=e}r.findBin=function(t,e,r){if(n(e.start))return r?Math.ceil((t-e.start)/e.size-1e-9)-1:Math.floor((t-e.start)/e.size+1e-9);var a,o,f=0,h=e.length,p=0,d=h>1?(e[h-1]-e[0])/(h-1):1;for(o=d>=0?r?s:l:r?u:c,t+=1e-9*d*(r?-1:1)*(d>=0?1:-1);f90&&i.log("Long binary search..."),f-1},r.sorterAsc=function(t,e){return t-e},r.sorterDes=function(t,e){return e-t},r.distinctVals=function(t){var e,n=t.slice();for(n.sort(r.sorterAsc),e=n.length-1;e>-1&&n[e]===o;e--);for(var i,a=n[e]-n[0]||1,s=a/(e||1)/1e4,l=[],c=0;c<=e;c++){var u=n[c],f=u-i;void 0===i?(l.push(u),i=u):f>s&&(a=Math.min(a,f),l.push(u),i=u)}return{vals:l,minDiff:a}},r.roundUp=function(t,e,r){for(var n,i=0,a=e.length-1,o=0,s=r?0:1,l=r?1:0,c=r?Math.ceil:Math.floor;i0&&(n=1),r&&n)return t.sort(e)}return n?t:t.reverse()},r.findIndexOfMin=function(t,e){e=e||a;for(var r,n=1/0,i=0;ia.length)&&(o=a.length),n(e)||(e=!1),i(a[0])){for(l=new Array(o),s=0;st.length-1)return t[t.length-1];var r=e%1;return r*t[Math.ceil(e)]+(1-r)*t[Math.floor(e)]}},{"./array":480,"fast-isnumeric":185}],523:[function(t,e,r){"use strict";var n=t("color-normalize");e.exports=function(t){return t?n(t):[0,0,0,1]}},{"color-normalize":84}],524:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../lib"),a=i.strTranslate,o=t("../constants/xmlns_namespaces"),s=t("../constants/alignment").LINE_SPACING,l=/([^$]*)([$]+[^$]*[$]+)([^$]*)/;r.convertToTspans=function(t,e,m){var M=t.text(),S=!t.attr("data-notex")&&e&&e._context.typesetMath&&"undefined"!=typeof MathJax&&M.match(l),C=n.select(t.node().parentNode);if(!C.empty()){var P=t.attr("class")?t.attr("class").split(" ")[0]:"text";return P+="-math",C.selectAll("svg."+P).remove(),C.selectAll("g."+P+"-group").remove(),t.style("display",null).attr({"data-unformatted":M,"data-math":"N"}),S?(e&&e._promises||[]).push(new Promise((function(e){t.style("display","none");var r=parseInt(t.node().style.fontSize,10),o={fontSize:r};!function(t,e,r){var a,o,s,l,h=parseInt((MathJax.version||"").split(".")[0]);if(2!==h&&3!==h)return void i.warn("No MathJax version:",MathJax.version);var p=function(){var r="math-output-"+i.randstr({},64),a=(l=n.select("body").append("div").attr({id:r}).style({visibility:"hidden",position:"absolute","font-size":e.fontSize+"px"}).text(t.replace(c,"\\lt ").replace(u,"\\gt "))).node();return 2===h?MathJax.Hub.Typeset(a):MathJax.typeset([a])},d=function(){var e=l.select(2===h?".MathJax_SVG":".MathJax"),a=!e.empty()&&l.select("svg").node();if(a){var o,s=a.getBoundingClientRect();o=2===h?n.select("body").select("#MathJax_SVG_glyphs"):e.select("defs"),r(e,o,s)}else i.log("There was an error in the tex syntax.",t),r();l.remove()};2===h?MathJax.Hub.Queue((function(){return o=i.extendDeepAll({},MathJax.Hub.config),s=MathJax.Hub.processSectionDelay,void 0!==MathJax.Hub.processSectionDelay&&(MathJax.Hub.processSectionDelay=0),MathJax.Hub.Config({messageStyle:"none",tex2jax:{inlineMath:f},displayAlign:"left"})}),(function(){if("SVG"!==(a=MathJax.Hub.config.menuSettings.renderer))return MathJax.Hub.setRenderer("SVG")}),p,d,(function(){if("SVG"!==a)return MathJax.Hub.setRenderer(a)}),(function(){return void 0!==s&&(MathJax.Hub.processSectionDelay=s),MathJax.Hub.Config(o)})):3===h&&(o=i.extendDeepAll({},MathJax.config),MathJax.config.tex||(MathJax.config.tex={}),MathJax.config.tex.inlineMath=f,"svg"!==(a=MathJax.config.startup.output)&&(MathJax.config.startup.output="svg"),MathJax.startup.defaultReady(),MathJax.startup.promise.then((function(){p(),d(),"svg"!==a&&(MathJax.config.startup.output=a),MathJax.config=o})))}(S[2],o,(function(n,i,o){C.selectAll("svg."+P).remove(),C.selectAll("g."+P+"-group").remove();var s=n&&n.select("svg");if(!s||!s.node())return I(),void e();var l=C.append("g").classed(P+"-group",!0).attr({"pointer-events":"none","data-unformatted":M,"data-math":"Y"});l.node().appendChild(s.node()),i&&i.node()&&s.node().insertBefore(i.node().cloneNode(!0),s.node().firstChild);var c=o.width,u=o.height;s.attr({class:P,height:u,preserveAspectRatio:"xMinYMin meet"}).style({overflow:"visible","pointer-events":"none"});var f=t.node().style.fill||"black",h=s.select("g");h.attr({fill:f,stroke:f});var p=h.node().getBoundingClientRect(),d=p.width,g=p.height;(d>c||g>u)&&(s.style("overflow","hidden"),d=(p=s.node().getBoundingClientRect()).width,g=p.height);var v=+t.attr("x"),y=+t.attr("y"),x=-(r||t.node().getBoundingClientRect().height)/4;if("y"===P[0])l.attr({transform:"rotate("+[-90,v,y]+")"+a(-d/2,x-g/2)});else if("l"===P[0])y=x-g/2;else if("a"===P[0]&&0!==P.indexOf("atitle"))v=0,y=x;else{var b=t.attr("text-anchor");v-=d*("middle"===b?.5:"end"===b?1:0),y=y+x-g/2}s.attr({x:v,y:y}),m&&m.call(t,l),e(l)}))}))):I(),t}function I(){C.empty()||(P=t.attr("class")+"-math",C.select("svg."+P).remove()),t.text("").style("white-space","pre"),function(t,e){e=e.replace(g," ");var r,a=!1,l=[],c=-1;function u(){c++;var e=document.createElementNS(o.svg,"tspan");n.select(e).attr({class:"line",dy:c*s+"em"}),t.appendChild(e),r=e;var i=l;if(l=[{node:e}],i.length>1)for(var a=1;a doesnt match end tag <"+t+">. Pretending it did match.",e),r=l[l.length-1].node}else i.log("Ignoring unexpected end tag .",e)}x.test(e)?u():(r=t,l=[{node:t}]);for(var S=e.split(v),C=0;C|>|>)/g;var f=[["$","$"],["\\(","\\)"]];var h={sup:"font-size:70%",sub:"font-size:70%",b:"font-weight:bold",i:"font-style:italic",a:"cursor:pointer",span:"",em:"font-style:italic;font-weight:bold"},p={sub:"0.3em",sup:"-0.6em"},d={sub:"-0.21em",sup:"0.42em"},m=["http:","https:","mailto:","",void 0,":"],g=r.NEWLINES=/(\r\n?|\n)/g,v=/(<[^<>]*>)/,y=/<(\/?)([^ >]*)(\s+(.*))?>/i,x=//i;r.BR_TAG_ALL=//gi;var b=/(^|[\s"'])style\s*=\s*("([^"]*);?"|'([^']*);?')/i,_=/(^|[\s"'])href\s*=\s*("([^"]*)"|'([^']*)')/i,w=/(^|[\s"'])target\s*=\s*("([^"\s]*)"|'([^'\s]*)')/i,T=/(^|[\s"'])popup\s*=\s*("([\w=,]*)"|'([\w=,]*)')/i;function k(t,e){if(!t)return null;var r=t.match(e),n=r&&(r[3]||r[4]);return n&&E(n)}var A=/(^|;)\s*color:/;r.plainText=function(t,e){for(var r=void 0!==(e=e||{}).len&&-1!==e.len?e.len:1/0,n=void 0!==e.allowedTags?e.allowedTags:["br"],i="...".length,a=t.split(v),o=[],s="",l=0,c=0;ci?o.push(u.substr(0,d-i)+"..."):o.push(u.substr(0,d));break}s=""}}return o.join("")};var M={mu:"\u03bc",amp:"&",lt:"<",gt:">",nbsp:"\xa0",times:"\xd7",plusmn:"\xb1",deg:"\xb0"},S=/&(#\d+|#x[\da-fA-F]+|[a-z]+);/g;function E(t){return t.replace(S,(function(t,e){return("#"===e.charAt(0)?function(t){if(t>1114111)return;var e=String.fromCodePoint;if(e)return e(t);var r=String.fromCharCode;return t<=65535?r(t):r(55232+(t>>10),t%1024+56320)}("x"===e.charAt(1)?parseInt(e.substr(2),16):parseInt(e.substr(1),10)):M[e])||t}))}function L(t){var e=encodeURI(decodeURI(t)),r=document.createElement("a"),n=document.createElement("a");r.href=t,n.href=e;var i=r.protocol,a=n.protocol;return-1!==m.indexOf(i)&&-1!==m.indexOf(a)?e:""}function C(t,e,r){var n,a,o,s=r.horizontalAlign,l=r.verticalAlign||"top",c=t.node().getBoundingClientRect(),u=e.node().getBoundingClientRect();return a="bottom"===l?function(){return c.bottom-n.height}:"middle"===l?function(){return c.top+(c.height-n.height)/2}:function(){return c.top},o="right"===s?function(){return c.right-n.width}:"center"===s?function(){return c.left+(c.width-n.width)/2}:function(){return c.left},function(){n=this.node().getBoundingClientRect();var t=o()-u.left,e=a()-u.top,s=r.gd||{};if(r.gd){s._fullLayout._calcInverseTransform(s);var l=i.apply3DTransform(s._fullLayout._invTransform)(t,e);t=l[0],e=l[1]}return this.style({top:e+"px",left:t+"px","z-index":1e3}),this}}r.convertEntities=E,r.sanitizeHTML=function(t){t=t.replace(g," ");for(var e=document.createElement("p"),r=e,i=[],a=t.split(v),o=0;oa.ts+e?l():a.timer=setTimeout((function(){l(),a.timer=null}),e)},r.done=function(t){var e=n[t];return e&&e.timer?new Promise((function(t){var r=e.onDone;e.onDone=function(){r&&r(),t(),e.onDone=null}})):Promise.resolve()},r.clear=function(t){if(t)i(n[t]),delete n[t];else for(var e in n)r.clear(e)}},{}],526:[function(t,e,r){"use strict";var n=t("fast-isnumeric");e.exports=function(t,e){if(t>0)return Math.log(t)/Math.LN10;var r=Math.log(Math.min(e[0],e[1]))/Math.LN10;return n(r)||(r=Math.log(Math.max(e[0],e[1]))/Math.LN10-6),r}},{"fast-isnumeric":185}],527:[function(t,e,r){"use strict";var n=e.exports={},i=t("../plots/geo/constants").locationmodeToLayer,a=t("topojson-client").feature;n.getTopojsonName=function(t){return[t.scope.replace(/ /g,"-"),"_",t.resolution.toString(),"m"].join("")},n.getTopojsonPath=function(t,e){return t+e+".json"},n.getTopojsonFeatures=function(t,e){var r=i[t.locationmode],n=e.objects[r];return a(e,n).features}},{"../plots/geo/constants":582,"topojson-client":310}],528:[function(t,e,r){"use strict";e.exports={moduleType:"locale",name:"en-US",dictionary:{"Click to enter Colorscale title":"Click to enter Colorscale title"},format:{date:"%m/%d/%Y"}}},{}],529:[function(t,e,r){"use strict";e.exports={moduleType:"locale",name:"en",dictionary:{"Click to enter Colorscale title":"Click to enter Colourscale title"},format:{days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],periods:["AM","PM"],dateTime:"%a %b %e %X %Y",date:"%d/%m/%Y",time:"%H:%M:%S",decimal:".",thousands:",",grouping:[3],currency:["$",""],year:"%Y",month:"%b %Y",dayMonth:"%b %-d",dayMonthYear:"%b %-d, %Y"}}},{}],530:[function(t,e,r){"use strict";var n=t("../registry");e.exports=function(t){for(var e,r,i=n.layoutArrayContainers,a=n.layoutArrayRegexes,o=t.split("[")[0],s=0;s0&&o.log("Clearing previous rejected promises from queue."),t._promises=[]},r.cleanLayout=function(t){var e,n;t||(t={}),t.xaxis1&&(t.xaxis||(t.xaxis=t.xaxis1),delete t.xaxis1),t.yaxis1&&(t.yaxis||(t.yaxis=t.yaxis1),delete t.yaxis1),t.scene1&&(t.scene||(t.scene=t.scene1),delete t.scene1);var a=(s.subplotsRegistry.cartesian||{}).attrRegex,l=(s.subplotsRegistry.polar||{}).attrRegex,f=(s.subplotsRegistry.ternary||{}).attrRegex,h=(s.subplotsRegistry.gl3d||{}).attrRegex,m=Object.keys(t);for(e=0;e3?(O.x=1.02,O.xanchor="left"):O.x<-2&&(O.x=-.02,O.xanchor="right"),O.y>3?(O.y=1.02,O.yanchor="bottom"):O.y<-2&&(O.y=-.02,O.yanchor="top")),d(t),"rotate"===t.dragmode&&(t.dragmode="orbit"),c.clean(t),t.template&&t.template.layout&&r.cleanLayout(t.template.layout),t},r.cleanData=function(t){for(var e=0;e0)return t.substr(0,e)}r.hasParent=function(t,e){for(var r=b(e);r;){if(r in t)return!0;r=b(r)}return!1};var _=["x","y","z"];r.clearAxisTypes=function(t,e,r){for(var n=0;n1&&a.warn("Full array edits are incompatible with other edits",f);var y=r[""][""];if(c(y))e.set(null);else{if(!Array.isArray(y))return a.warn("Unrecognized full array edit value",f,y),!0;e.set(y)}return!m&&(h(g,v),p(t),!0)}var x,b,_,w,T,k,A,M,S=Object.keys(r).map(Number).sort(o),E=e.get(),L=E||[],C=u(v,f).get(),P=[],I=-1,O=L.length;for(x=0;xL.length-(A?0:1))a.warn("index out of range",f,_);else if(void 0!==k)T.length>1&&a.warn("Insertion & removal are incompatible with edits to the same index.",f,_),c(k)?P.push(_):A?("add"===k&&(k={}),L.splice(_,0,k),C&&C.splice(_,0,{})):a.warn("Unrecognized full object edit value",f,_,k),-1===I&&(I=_);else for(b=0;b=0;x--)L.splice(P[x],1),C&&C.splice(P[x],1);if(L.length?E||e.set(L):e.set(null),m)return!1;if(h(g,v),d!==i){var z;if(-1===I)z=S;else{for(O=Math.max(L.length,O),z=[],x=0;x=I);x++)z.push(_);for(x=I;x=t.data.length||i<-t.data.length)throw new Error(r+" must be valid indices for gd.data.");if(e.indexOf(i,n+1)>-1||i>=0&&e.indexOf(-t.data.length+i)>-1||i<0&&e.indexOf(t.data.length+i)>-1)throw new Error("each index in "+r+" must be unique.")}}function I(t,e,r){if(!Array.isArray(t.data))throw new Error("gd.data must be an array.");if(void 0===e)throw new Error("currentIndices is a required argument.");if(Array.isArray(e)||(e=[e]),P(t,e,"currentIndices"),void 0===r||Array.isArray(r)||(r=[r]),void 0!==r&&P(t,r,"newIndices"),void 0!==r&&e.length!==r.length)throw new Error("current and new indices must be of equal length.")}function O(t,e,r,n,a){!function(t,e,r,n){var i=o.isPlainObject(n);if(!Array.isArray(t.data))throw new Error("gd.data must be an array");if(!o.isPlainObject(e))throw new Error("update must be a key:value object");if(void 0===r)throw new Error("indices must be an integer or array of integers");for(var a in P(t,r,"indices"),e){if(!Array.isArray(e[a])||e[a].length!==r.length)throw new Error("attribute "+a+" must be an array of length equal to indices array length");if(i&&(!(a in n)||!Array.isArray(n[a])||n[a].length!==e[a].length))throw new Error("when maxPoints is set as a key:value object it must contain a 1:1 corrispondence with the keys and number of traces in the update object")}}(t,e,r,n);for(var l=function(t,e,r,n){var a,l,c,u,f,h=o.isPlainObject(n),p=[];for(var d in Array.isArray(r)||(r=[r]),r=C(r,t.data.length-1),e)for(var m=0;m-1&&-1===r.indexOf("grouptitlefont")?l(r,r.replace("titlefont","title.font")):r.indexOf("titleposition")>-1?l(r,r.replace("titleposition","title.position")):r.indexOf("titleside")>-1?l(r,r.replace("titleside","title.side")):r.indexOf("titleoffset")>-1&&l(r,r.replace("titleoffset","title.offset")):l(r,r.replace("title","title.text"));function l(e,r){t[r]=t[e],delete t[e]}}function U(t,e,r){t=o.getGraphDiv(t),_.clearPromiseQueue(t);var n={};if("string"==typeof e)n[e]=r;else{if(!o.isPlainObject(e))return o.warn("Relayout fail.",e,r),Promise.reject();n=o.extendFlat({},e)}Object.keys(n).length&&(t.changed=!0);var i=W(t,n),a=i.flags;a.calc&&(t.calcdata=void 0);var s=[h.previousPromises];a.layoutReplot?s.push(w.layoutReplot):Object.keys(n).length&&(V(t,a,i)||h.supplyDefaults(t),a.legend&&s.push(w.doLegend),a.layoutstyle&&s.push(w.layoutStyles),a.axrange&&H(s,i.rangesAltered),a.ticks&&s.push(w.doTicksRelayout),a.modebar&&s.push(w.doModeBar),a.camera&&s.push(w.doCamera),a.colorbars&&s.push(w.doColorBars),s.push(M)),s.push(h.rehover,h.redrag),c.add(t,U,[t,i.undoit],U,[t,i.redoit]);var l=o.syncOrAsync(s,t);return l&&l.then||(l=Promise.resolve(t)),l.then((function(){return t.emit("plotly_relayout",i.eventData),t}))}function V(t,e,r){var n=t._fullLayout;if(!e.axrange)return!1;for(var i in e)if("axrange"!==i&&e[i])return!1;for(var a in r.rangesAltered){var o=p.id2name(a),s=t.layout[o],l=n[o];if(l.autorange=s.autorange,s.range&&(l.range=s.range.slice()),l.cleanRange(),l._matchGroup)for(var c in l._matchGroup)if(c!==a){var u=n[p.id2name(c)];u.autorange=l.autorange,u.range=l.range.slice(),u._input.range=l.range.slice()}}return!0}function H(t,e){var r=e?function(t){var r=[],n=!0;for(var i in e){var a=p.getFromId(t,i);if(r.push(i),-1!==(a.ticklabelposition||"").indexOf("inside")&&a._anchorAxis&&r.push(a._anchorAxis._id),a._matchGroup)for(var o in a._matchGroup)e[o]||r.push(o);a.automargin&&(n=!1)}return p.draw(t,r,{skipTitle:n})}:function(t){return p.draw(t,"redraw")};t.push(y,w.doAutoRangeAndConstraints,r,w.drawData,w.finalDraw)}var q=/^[xyz]axis[0-9]*\.range(\[[0|1]\])?$/,G=/^[xyz]axis[0-9]*\.autorange$/,Y=/^[xyz]axis[0-9]*\.domain(\[[0|1]\])?$/;function W(t,e){var r,n,i,a=t.layout,l=t._fullLayout,c=l._guiEditing,h=F(l._preGUI,c),d=Object.keys(e),m=p.list(t),g=o.extendDeepAll({},e),v={};for(j(e),d=Object.keys(e),n=0;n0&&"string"!=typeof O.parts[D];)D--;var B=O.parts[D],N=O.parts[D-1]+"."+B,U=O.parts.slice(0,D).join("."),V=s(t.layout,U).get(),H=s(l,U).get(),W=O.get();if(void 0!==z){M[I]=z,S[I]="reverse"===B?z:R(W);var Z=f.getLayoutValObject(l,O.parts);if(Z&&Z.impliedEdits&&null!==z)for(var J in Z.impliedEdits)E(o.relativeAttr(I,J),Z.impliedEdits[J]);if(-1!==["width","height"].indexOf(I))if(z){E("autosize",null);var K="height"===I?"width":"height";E(K,l[K])}else l[I]=t._initialAutoSize[I];else if("autosize"===I)E("width",z?null:l.width),E("height",z?null:l.height);else if(N.match(q))P(N),s(l,U+"._inputRange").set(null);else if(N.match(G)){P(N),s(l,U+"._inputRange").set(null);var Q=s(l,U).get();Q._inputDomain&&(Q._input.domain=Q._inputDomain.slice())}else N.match(Y)&&s(l,U+"._inputDomain").set(null);if("type"===B){L=V;var $="linear"===H.type&&"log"===z,tt="log"===H.type&&"linear"===z;if($||tt){if(L&&L.range)if(H.autorange)$&&(L.range=L.range[1]>L.range[0]?[1,2]:[2,1]);else{var et=L.range[0],rt=L.range[1];$?(et<=0&&rt<=0&&E(U+".autorange",!0),et<=0?et=rt/1e6:rt<=0&&(rt=et/1e6),E(U+".range[0]",Math.log(et)/Math.LN10),E(U+".range[1]",Math.log(rt)/Math.LN10)):(E(U+".range[0]",Math.pow(10,et)),E(U+".range[1]",Math.pow(10,rt)))}else E(U+".autorange",!0);Array.isArray(l._subplots.polar)&&l._subplots.polar.length&&l[O.parts[0]]&&"radialaxis"===O.parts[1]&&delete l[O.parts[0]]._subplot.viewInitial["radialaxis.range"],u.getComponentMethod("annotations","convertCoords")(t,H,z,E),u.getComponentMethod("images","convertCoords")(t,H,z,E)}else E(U+".autorange",!0),E(U+".range",null);s(l,U+"._inputRange").set(null)}else if(B.match(k)){var nt=s(l,I).get(),it=(z||{}).type;it&&"-"!==it||(it="linear"),u.getComponentMethod("annotations","convertCoords")(t,nt,it,E),u.getComponentMethod("images","convertCoords")(t,nt,it,E)}var at=b.containerArrayMatch(I);if(at){r=at.array,n=at.index;var ot=at.property,st=Z||{editType:"calc"};""!==n&&""===ot&&(b.isAddVal(z)?S[I]=null:b.isRemoveVal(z)?S[I]=(s(a,r).get()||[])[n]:o.warn("unrecognized full object value",e)),T.update(A,st),v[r]||(v[r]={});var lt=v[r][n];lt||(lt=v[r][n]={}),lt[ot]=z,delete e[I]}else"reverse"===B?(V.range?V.range.reverse():(E(U+".autorange",!0),V.range=[1,0]),H.autorange?A.calc=!0:A.plot=!0):(l._has("scatter-like")&&l._has("regl")&&"dragmode"===I&&("lasso"===z||"select"===z)&&"lasso"!==W&&"select"!==W||l._has("gl2d")?A.plot=!0:Z?T.update(A,Z):A.calc=!0,O.set(z))}}for(r in v){b.applyContainerArrayChanges(t,h(a,r),v[r],A,h)||(A.plot=!0)}for(var ct in C){var ut=(L=p.getFromId(t,ct))&&L._constraintGroup;if(ut)for(var ft in A.calc=!0,ut)C[ft]||(p.getFromId(t,ft)._constraintShrinkable=!0)}return(X(t)||e.height||e.width)&&(A.plot=!0),(A.plot||A.calc)&&(A.layoutReplot=!0),{flags:A,rangesAltered:C,undoit:S,redoit:M,eventData:g}}function X(t){var e=t._fullLayout,r=e.width,n=e.height;return t.layout.autosize&&h.plotAutoSize(t,t.layout,e),e.width!==r||e.height!==n}function Z(t,e,n,i){t=o.getGraphDiv(t),_.clearPromiseQueue(t),o.isPlainObject(e)||(e={}),o.isPlainObject(n)||(n={}),Object.keys(e).length&&(t.changed=!0),Object.keys(n).length&&(t.changed=!0);var a=_.coerceTraceIndices(t,i),s=N(t,o.extendFlat({},e),a),l=s.flags,u=W(t,o.extendFlat({},n)),f=u.flags;(l.calc||f.calc)&&(t.calcdata=void 0),l.clearAxisTypes&&_.clearAxisTypes(t,a,n);var p=[];f.layoutReplot?p.push(w.layoutReplot):l.fullReplot?p.push(r._doPlot):(p.push(h.previousPromises),V(t,f,u)||h.supplyDefaults(t),l.style&&p.push(w.doTraceStyle),(l.colorbars||f.colorbars)&&p.push(w.doColorBars),f.legend&&p.push(w.doLegend),f.layoutstyle&&p.push(w.layoutStyles),f.axrange&&H(p,u.rangesAltered),f.ticks&&p.push(w.doTicksRelayout),f.modebar&&p.push(w.doModeBar),f.camera&&p.push(w.doCamera),p.push(M)),p.push(h.rehover,h.redrag),c.add(t,Z,[t,s.undoit,u.undoit,s.traces],Z,[t,s.redoit,u.redoit,s.traces]);var d=o.syncOrAsync(p,t);return d&&d.then||(d=Promise.resolve(t)),d.then((function(){return t.emit("plotly_update",{data:s.eventData,layout:u.eventData}),t}))}function J(t){return function(e){e._fullLayout._guiEditing=!0;var r=t.apply(null,arguments);return e._fullLayout._guiEditing=!1,r}}var K=[{pattern:/^hiddenlabels/,attr:"legend.uirevision"},{pattern:/^((x|y)axis\d*)\.((auto)?range|title\.text)/},{pattern:/axis\d*\.showspikes$/,attr:"modebar.uirevision"},{pattern:/(hover|drag)mode$/,attr:"modebar.uirevision"},{pattern:/^(scene\d*)\.camera/},{pattern:/^(geo\d*)\.(projection|center|fitbounds)/},{pattern:/^(ternary\d*\.[abc]axis)\.(min|title\.text)$/},{pattern:/^(polar\d*\.radialaxis)\.((auto)?range|angle|title\.text)/},{pattern:/^(polar\d*\.angularaxis)\.rotation/},{pattern:/^(mapbox\d*)\.(center|zoom|bearing|pitch)/},{pattern:/^legend\.(x|y)$/,attr:"editrevision"},{pattern:/^(shapes|annotations)/,attr:"editrevision"},{pattern:/^title\.text$/,attr:"editrevision"}],Q=[{pattern:/^selectedpoints$/,attr:"selectionrevision"},{pattern:/(^|value\.)visible$/,attr:"legend.uirevision"},{pattern:/^dimensions\[\d+\]\.constraintrange/},{pattern:/^node\.(x|y|groups)/},{pattern:/^level$/},{pattern:/(^|value\.)name$/},{pattern:/colorbar\.title\.text$/},{pattern:/colorbar\.(x|y)$/,attr:"editrevision"}];function $(t,e){for(var r=0;r1;)if(n.pop(),void 0!==(r=s(e,n.join(".")+".uirevision").get()))return r;return e.uirevision}function et(t,e){for(var r=0;r=i.length?i[0]:i[t]:i}function l(t){return Array.isArray(a)?t>=a.length?a[0]:a[t]:a}function c(t,e){var r=0;return function(){if(t&&++r===e)return t()}}return void 0===n._frameWaitingCnt&&(n._frameWaitingCnt=0),new Promise((function(a,u){function f(){n._currentFrame&&n._currentFrame.onComplete&&n._currentFrame.onComplete();var e=n._currentFrame=n._frameQueue.shift();if(e){var r=e.name?e.name.toString():null;t._fullLayout._currentFrame=r,n._lastFrameAt=Date.now(),n._timeToNext=e.frameOpts.duration,h.transition(t,e.frame.data,e.frame.layout,_.coerceTraceIndices(t,e.frame.traces),e.frameOpts,e.transitionOpts).then((function(){e.onComplete&&e.onComplete()})),t.emit("plotly_animatingframe",{name:r,frame:e.frame,animation:{frame:e.frameOpts,transition:e.transitionOpts}})}else t.emit("plotly_animated"),window.cancelAnimationFrame(n._animationRaf),n._animationRaf=null}function p(){t.emit("plotly_animating"),n._lastFrameAt=-1/0,n._timeToNext=0,n._runningTransitions=0,n._currentFrame=null;var e=function(){n._animationRaf=window.requestAnimationFrame(e),Date.now()-n._lastFrameAt>n._timeToNext&&f()};e()}var d,m,g=0;function v(t){return Array.isArray(i)?g>=i.length?t.transitionOpts=i[g]:t.transitionOpts=i[0]:t.transitionOpts=i,g++,t}var y=[],x=null==e,b=Array.isArray(e);if(!x&&!b&&o.isPlainObject(e))y.push({type:"object",data:v(o.extendFlat({},e))});else if(x||-1!==["string","number"].indexOf(typeof e))for(d=0;d0&&kk)&&A.push(m);y=A}}y.length>0?function(e){if(0!==e.length){for(var i=0;i=0;n--)if(o.isPlainObject(e[n])){var m=e[n].name,g=(u[m]||d[m]||{}).name,v=e[n].name,y=u[g]||d[g];g&&v&&"number"==typeof v&&y&&A<5&&(A++,o.warn('addFrames: overwriting frame "'+(u[g]||d[g]).name+'" with a frame whose name of type "number" also equates to "'+g+'". This is valid but may potentially lead to unexpected behavior since all plotly.js frame names are stored internally as strings.'),5===A&&o.warn("addFrames: This API call has yielded too many of these warnings. For the rest of this call, further warnings about numeric frame names will be suppressed.")),d[m]={name:m},p.push({frame:h.supplyFrameDefaults(e[n]),index:r&&void 0!==r[n]&&null!==r[n]?r[n]:f+n})}p.sort((function(t,e){return t.index>e.index?-1:t.index=0;n--){if("number"==typeof(i=p[n].frame).name&&o.warn("Warning: addFrames accepts frames with numeric names, but the numbers areimplicitly cast to strings"),!i.name)for(;u[i.name="frame "+t._transitionData._counter++];);if(u[i.name]){for(a=0;a=0;r--)n=e[r],a.push({type:"delete",index:n}),s.unshift({type:"insert",index:n,value:i[n]});var l=h.modifyFrames,u=h.modifyFrames,f=[t,s],p=[t,a];return c&&c.add(t,l,f,u,p),h.modifyFrames(t,a)},r.addTraces=function t(e,n,i){e=o.getGraphDiv(e);var a,s,l=[],u=r.deleteTraces,f=t,h=[e,l],p=[e,n];for(function(t,e,r){var n,i;if(!Array.isArray(t.data))throw new Error("gd.data must be an array.");if(void 0===e)throw new Error("traces must be defined.");for(Array.isArray(e)||(e=[e]),n=0;n=0&&r=0&&r=a.length)return!1;if(2===t.dimensions){if(r++,e.length===r)return t;var o=e[r];if(!y(o))return!1;t=a[i][o]}else t=a[i]}else t=a}}return t}function y(t){return t===Math.round(t)&&t>=0}function x(){var t,e,r={};for(t in f(r,o),n.subplotsRegistry){if((e=n.subplotsRegistry[t]).layoutAttributes)if(Array.isArray(e.attr))for(var i=0;i=l.length)return!1;i=(r=(n.transformsRegistry[l[c].type]||{}).attributes)&&r[e[2]],s=3}else{var u=t._module;if(u||(u=(n.modules[t.type||a.type.dflt]||{})._module),!u)return!1;if(!(i=(r=u.attributes)&&r[o])){var f=u.basePlotModule;f&&f.attributes&&(i=f.attributes[o])}i||(i=a[o])}return v(i,e,s)},r.getLayoutValObject=function(t,e){return v(function(t,e){var r,i,a,s,l=t._basePlotModules;if(l){var c;for(r=0;r=i&&(r._input||{})._templateitemname;o&&(a=i);var s,l=e+"["+a+"]";function c(){s={},o&&(s[l]={},s[l].templateitemname=o)}function u(t,e){o?n.nestedProperty(s[l],t).set(e):s[l+"."+t]=e}function f(){var t=s;return c(),t}return c(),{modifyBase:function(t,e){s[t]=e},modifyItem:u,getUpdateObj:f,applyUpdate:function(e,r){e&&u(e,r);var i=f();for(var a in i)n.nestedProperty(t,a).set(i[a])}}}},{"../lib":498,"../plots/attributes":545}],539:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../registry"),a=t("../plots/plots"),o=t("../lib"),s=t("../lib/clear_gl_canvases"),l=t("../components/color"),c=t("../components/drawing"),u=t("../components/titles"),f=t("../components/modebar"),h=t("../plots/cartesian/axes"),p=t("../constants/alignment"),d=t("../plots/cartesian/constraints"),m=d.enforce,g=d.clean,v=t("../plots/cartesian/autorange").doAutoRange;function y(t,e,r){for(var n=0;n=t[1]||i[1]<=t[0])&&(a[0]e[0]))return!0}return!1}function x(t){var e,i,s,u,d,m,g=t._fullLayout,v=g._size,x=v.p,_=h.list(t,"",!0);if(g._paperdiv.style({width:t._context.responsive&&g.autosize&&!t._context._hasZeroWidth&&!t.layout.width?"100%":g.width+"px",height:t._context.responsive&&g.autosize&&!t._context._hasZeroHeight&&!t.layout.height?"100%":g.height+"px"}).selectAll(".main-svg").call(c.setSize,g.width,g.height),t._context.setBackground(t,g.paper_bgcolor),r.drawMainTitle(t),f.manage(t),!g._has("cartesian"))return a.previousPromises(t);function T(t,e,r){var n=t._lw/2;return"x"===t._id.charAt(0)?e?"top"===r?e._offset-x-n:e._offset+e._length+x+n:v.t+v.h*(1-(t.position||0))+n%1:e?"right"===r?e._offset+e._length+x+n:e._offset-x-n:v.l+v.w*(t.position||0)+n%1}for(e=0;e<_.length;e++){var k=(u=_[e])._anchorAxis;u._linepositions={},u._lw=c.crispRound(t,u.linewidth,1),u._mainLinePosition=T(u,k,u.side),u._mainMirrorPosition=u.mirror&&k?T(u,k,p.OPPOSITE_SIDE[u.side]):null}var A=[],M=[],S=[],E=1===l.opacity(g.paper_bgcolor)&&1===l.opacity(g.plot_bgcolor)&&g.paper_bgcolor===g.plot_bgcolor;for(i in g._plots)if((s=g._plots[i]).mainplot)s.bg&&s.bg.remove(),s.bg=void 0;else{var L=s.xaxis.domain,C=s.yaxis.domain,P=s.plotgroup;if(y(L,C,S)){var I=P.node(),O=s.bg=o.ensureSingle(P,"rect","bg");I.insertBefore(O.node(),I.childNodes[0]),M.push(i)}else P.select("rect.bg").remove(),S.push([L,C]),E||(A.push(i),M.push(i))}var z,D,R,F,B,N,j,U,V,H,q,G,Y,W=g._bgLayer.selectAll(".bg").data(A);for(W.enter().append("rect").classed("bg",!0),W.exit().remove(),W.each((function(t){g._plots[t].bg=n.select(this)})),e=0;eT?u.push({code:"unused",traceType:y,templateCount:w,dataCount:T}):T>w&&u.push({code:"reused",traceType:y,templateCount:w,dataCount:T})}}else u.push({code:"data"});if(function t(e,r){for(var n in e)if("_"!==n.charAt(0)){var a=e[n],o=m(e,n,r);i(a)?(Array.isArray(e)&&!1===a._template&&a.templateitemname&&u.push({code:"missing",path:o,templateitemname:a.templateitemname}),t(a,o)):Array.isArray(a)&&g(a)&&t(a,o)}}({data:p,layout:h},""),u.length)return u.map(v)}},{"../lib":498,"../plots/attributes":545,"../plots/plots":614,"./plot_config":536,"./plot_schema":537,"./plot_template":538}],541:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("./plot_api"),a=t("../plots/plots"),o=t("../lib"),s=t("../snapshot/helpers"),l=t("../snapshot/tosvg"),c=t("../snapshot/svgtoimg"),u=t("../version").version,f={format:{valType:"enumerated",values:["png","jpeg","webp","svg","full-json"],dflt:"png"},width:{valType:"number",min:1},height:{valType:"number",min:1},scale:{valType:"number",min:0,dflt:1},setBackground:{valType:"any",dflt:!1},imageDataOnly:{valType:"boolean",dflt:!1}};e.exports=function(t,e){var r,h,p,d;function m(t){return!(t in e)||o.validate(e[t],f[t])}if(e=e||{},o.isPlainObject(t)?(r=t.data||[],h=t.layout||{},p=t.config||{},d={}):(t=o.getGraphDiv(t),r=o.extendDeep([],t.data),h=o.extendDeep({},t.layout),p=t._context,d=t._fullLayout||{}),!m("width")&&null!==e.width||!m("height")&&null!==e.height)throw new Error("Height and width should be pixel values.");if(!m("format"))throw new Error("Export format is not "+o.join2(f.format.values,", "," or ")+".");var g={};function v(t,r){return o.coerce(e,g,f,t,r)}var y=v("format"),x=v("width"),b=v("height"),_=v("scale"),w=v("setBackground"),T=v("imageDataOnly"),k=document.createElement("div");k.style.position="absolute",k.style.left="-5000px",document.body.appendChild(k);var A=o.extendFlat({},h);x?A.width=x:null===e.width&&n(d.width)&&(A.width=d.width),b?A.height=b:null===e.height&&n(d.height)&&(A.height=d.height);var M=o.extendFlat({},p,{_exportedPlot:!0,staticPlot:!0,setBackground:w}),S=s.getRedrawFunc(k);function E(){return new Promise((function(t){setTimeout(t,s.getDelay(k._fullLayout))}))}function L(){return new Promise((function(t,e){var r=l(k,y,_),n=k._fullLayout.width,f=k._fullLayout.height;function h(){i.purge(k),document.body.removeChild(k)}if("full-json"===y){var p=a.graphJson(k,!1,"keepdata","object",!0,!0);return p.version=u,p=JSON.stringify(p),h(),t(T?p:s.encodeJSON(p))}if(h(),"svg"===y)return t(T?r:s.encodeSVG(r));var d=document.createElement("canvas");d.id=o.randstr(),c({format:y,width:n,height:f,scale:_,canvas:d,svg:r,promise:!0}).then(t).catch(e)}))}return new Promise((function(t,e){i.newPlot(k,r,A,M).then(S).then(E).then(L).then((function(e){t(function(t){return T?t.replace(s.IMAGE_URL_PREFIX,""):t}(e))})).catch((function(t){e(t)}))}))}},{"../lib":498,"../plots/plots":614,"../snapshot/helpers":637,"../snapshot/svgtoimg":639,"../snapshot/tosvg":641,"../version":1118,"./plot_api":535,"fast-isnumeric":185}],542:[function(t,e,r){"use strict";var n=t("../lib"),i=t("../plots/plots"),a=t("./plot_schema"),o=t("./plot_config").dfltConfig,s=n.isPlainObject,l=Array.isArray,c=n.isArrayOrTypedArray;function u(t,e,r,i,a,o){o=o||[];for(var f=Object.keys(t),h=0;hx.length&&i.push(d("unused",a,v.concat(x.length)));var A,M,S,E,L,C=x.length,P=Array.isArray(k);if(P&&(C=Math.min(C,k.length)),2===b.dimensions)for(M=0;Mx[M].length&&i.push(d("unused",a,v.concat(M,x[M].length)));var I=x[M].length;for(A=0;A<(P?Math.min(I,k[M].length):I);A++)S=P?k[M][A]:k,E=y[M][A],L=x[M][A],n.validate(E,S)?L!==E&&L!==+E&&i.push(d("dynamic",a,v.concat(M,A),E,L)):i.push(d("value",a,v.concat(M,A),E))}else i.push(d("array",a,v.concat(M),y[M]));else for(M=0;M1&&p.push(d("object","layout"))),i.supplyDefaults(m);for(var g=m._fullData,v=r.length,y=0;y0&&Math.round(f)===f))return{vals:i};c=f}for(var h=e.calendar,p="start"===l,d="end"===l,m=t[r+"period0"],g=a(m,h)||0,v=[],y=[],x=[],b=i.length,_=0;_A;)k=o(k,-c,h);for(;k<=A;)k=o(k,c,h);T=o(k,-c,h)}else{for(k=g+(w=Math.round((A-g)/u))*u;k>A;)k-=u;for(;k<=A;)k+=u;T=k-u}v[_]=p?T:d?k:(T+k)/2,y[_]=T,x[_]=k}return{vals:v,starts:y,ends:x}}},{"../../constants/numerical":474,"../../lib":498,"fast-isnumeric":185}],547:[function(t,e,r){"use strict";e.exports={xaxis:{valType:"subplotid",dflt:"x",editType:"calc+clearAxisTypes"},yaxis:{valType:"subplotid",dflt:"y",editType:"calc+clearAxisTypes"}}},{}],548:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("fast-isnumeric"),a=t("../../lib"),o=t("../../constants/numerical").FP_SAFE,s=t("../../registry"),l=t("../../components/drawing"),c=t("./axis_ids"),u=c.getFromId,f=c.isLinked;function h(t,e){var r,n,i=[],o=t._fullLayout,s=d(o,e,0),l=d(o,e,1),c=m(t,e),u=c.min,f=c.max;if(0===u.length||0===f.length)return a.simpleMap(e.range,e.r2l);var h=u[0].val,g=f[0].val;for(r=1;r0&&((T=E-s(x)-l(b))>L?k/T>C&&(_=x,w=b,C=k/T):k/E>C&&(_={val:x.val,nopad:1},w={val:b.val,nopad:1},C=k/E));if(h===g){var P=h-1,I=h+1;if(M)if(0===h)i=[0,1];else{var O=(h>0?f:u).reduce((function(t,e){return Math.max(t,l(e))}),0),z=h/(1-Math.min(.5,O/E));i=h>0?[0,z]:[z,0]}else i=S?[Math.max(0,P),Math.max(1,I)]:[P,I]}else M?(_.val>=0&&(_={val:0,nopad:1}),w.val<=0&&(w={val:0,nopad:1})):S&&(_.val-C*s(_)<0&&(_={val:0,nopad:1}),w.val<=0&&(w={val:1,nopad:1})),C=(w.val-_.val-p(e,x.val,b.val))/(E-s(_)-l(w)),i=[_.val-C*s(_),w.val+C*l(w)];return v&&i.reverse(),a.simpleMap(i,e.l2r||Number)}function p(t,e,r){var n=0;if(t.rangebreaks)for(var i=t.locateBreaks(e,r),a=0;a0?r.ppadplus:r.ppadminus)||r.ppad||0),S=A((t._m>0?r.ppadminus:r.ppadplus)||r.ppad||0),E=A(r.vpadplus||r.vpad),L=A(r.vpadminus||r.vpad);if(!T){if(h=1/0,p=-1/0,w)for(n=0;n0&&(h=a),a>p&&a-o&&(h=a),a>p&&a=I;n--)P(n);return{min:d,max:m,opts:r}},concatExtremes:m};function m(t,e,r){var n,i,a,o=e._id,s=t._fullData,l=t._fullLayout,c=[],f=[];function h(t,e){for(n=0;n=r&&(c.extrapad||!o)){s=!1;break}i(e,c.val)&&c.pad<=r&&(o||!c.extrapad)&&(t.splice(l,1),l--)}if(s){var u=a&&0===e;t.push({val:e,pad:u?0:r,extrapad:!u&&o})}}function x(t){return i(t)&&Math.abs(t)=e}},{"../../components/drawing":383,"../../constants/numerical":474,"../../lib":498,"../../registry":633,"./axis_ids":553,"@plotly/d3":58,"fast-isnumeric":185}],549:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("fast-isnumeric"),a=t("../../plots/plots"),o=t("../../registry"),s=t("../../lib"),l=s.strTranslate,c=t("../../lib/svg_text_utils"),u=t("../../components/titles"),f=t("../../components/color"),h=t("../../components/drawing"),p=t("./layout_attributes"),d=t("./clean_ticks"),m=t("../../constants/numerical"),g=m.ONEMAXYEAR,v=m.ONEAVGYEAR,y=m.ONEMINYEAR,x=m.ONEMAXQUARTER,b=m.ONEAVGQUARTER,_=m.ONEMINQUARTER,w=m.ONEMAXMONTH,T=m.ONEAVGMONTH,k=m.ONEMINMONTH,A=m.ONEWEEK,M=m.ONEDAY,S=M/2,E=m.ONEHOUR,L=m.ONEMIN,C=m.ONESEC,P=m.MINUS_SIGN,I=m.BADNUM,O={K:"zeroline"},z={K:"gridline",L:"path"},D={K:"tick",L:"path"},R={K:"tick",L:"text"},F=t("../../constants/alignment"),B=F.MID_SHIFT,N=F.CAP_SHIFT,j=F.LINE_SPACING,U=F.OPPOSITE_SIDE,V=e.exports={};V.setConvert=t("./set_convert");var H=t("./axis_autotype"),q=t("./axis_ids"),G=q.idSort,Y=q.isLinked;V.id2name=q.id2name,V.name2id=q.name2id,V.cleanId=q.cleanId,V.list=q.list,V.listIds=q.listIds,V.getFromId=q.getFromId,V.getFromTrace=q.getFromTrace;var W=t("./autorange");V.getAutoRange=W.getAutoRange,V.findExtremes=W.findExtremes;function X(t){var e=1e-4*(t[1]-t[0]);return[t[0]-e,t[1]+e]}V.coerceRef=function(t,e,r,n,i,a){var o=n.charAt(n.length-1),l=r._fullLayout._subplots[o+"axis"],c=n+"ref",u={};return i||(i=l[0]||("string"==typeof a?a:a[0])),a||(a=i),l=l.concat(l.map((function(t){return t+" domain"}))),u[c]={valType:"enumerated",values:l.concat(a?"string"==typeof a?[a]:a:[]),dflt:i},s.coerce(t,e,u,c)},V.getRefType=function(t){return void 0===t?t:"paper"===t?"paper":"pixel"===t?"pixel":/( domain)$/.test(t)?"domain":"range"},V.coercePosition=function(t,e,r,n,i,a){var o,l;if("range"!==V.getRefType(n))o=s.ensureNumber,l=r(i,a);else{var c=V.getFromId(e,n);l=r(i,a=c.fraction2r(a)),o=c.cleanPos}t[i]=o(l)},V.cleanPosition=function(t,e,r){return("paper"===r||"pixel"===r?s.ensureNumber:V.getFromId(e,r).cleanPos)(t)},V.redrawComponents=function(t,e){e=e||V.listIds(t);var r=t._fullLayout;function n(n,i,a,s){for(var l=o.getComponentMethod(n,i),c={},u=0;u2e-6||((r-t._forceTick0)/t._minDtick%1+1.000001)%1>2e-6)&&(t._minDtick=0)):t._minDtick=0},V.saveRangeInitial=function(t,e){for(var r=V.list(t,"",!0),n=!1,i=0;i.3*h||u(n)||u(a))){var p=r.dtick/2;t+=t+p.8){var o=Number(r.substr(1));a.exactYears>.8&&o%12==0?t=V.tickIncrement(t,"M6","reverse")+1.5*M:a.exactMonths>.8?t=V.tickIncrement(t,"M1","reverse")+15.5*M:t-=S;var l=V.tickIncrement(t,r);if(l<=n)return l}return t}(y,t,v,c,a)),g=y,0;g<=u;)g=V.tickIncrement(g,v,!1,a);return{start:e.c2r(y,0,a),end:e.c2r(g,0,a),size:v,_dataSpan:u-c}},V.prepTicks=function(t,e){var r=s.simpleMap(t.range,t.r2l,void 0,void 0,e);if(t._dtickInit=t.dtick,t._tick0Init=t.tick0,"auto"===t.tickmode||!t.dtick){var n,a=t.nticks;a||("category"===t.type||"multicategory"===t.type?(n=t.tickfont?s.bigFont(t.tickfont.size||12):15,a=t._length/n):(n="y"===t._id.charAt(0)?40:80,a=s.constrain(t._length/n,4,9)+1),"radialaxis"===t._name&&(a*=2)),"array"===t.tickmode&&(a*=100),t._roughDTick=Math.abs(r[1]-r[0])/a,V.autoTicks(t,t._roughDTick),t._minDtick>0&&t.dtick<2*t._minDtick&&(t.dtick=t._minDtick,t.tick0=t.l2r(t._forceTick0))}"period"===t.ticklabelmode&&function(t){var e;function r(){return!(i(t.dtick)||"M"!==t.dtick.charAt(0))}var n=r(),a=V.getTickFormat(t);if(a){var o=t._dtickInit!==t.dtick;/%[fLQsSMX]/.test(a)||(/%[HI]/.test(a)?(e=E,o&&!n&&t.dticka&&f=o:d<=o;d=V.tickIncrement(d,t.dtick,l,t.calendar)){if(R++,t.rangebreaks&&!l){if(d=u)break}if(P.length>C||d===O)break;O=d;var F=!1;h&&d!==(0|d)&&(F=!0);var B={minor:F,value:d};L>1&&R%L&&(B.skipLabel=!0),P.push(B)}if(p&&function(t,e,r){for(var n=0;n0?(a=n-1,o=n):(a=n,o=n);var s,l=t[a].value,c=t[o].value,u=Math.abs(c-l),f=r||u,h=0;f>=y?h=u>=y&&u<=g?u:v:r===b&&f>=_?h=u>=_&&u<=x?u:b:f>=k?h=u>=k&&u<=w?u:T:r===A&&f>=A?h=A:f>=M?h=M:r===S&&f>=S?h=S:r===E&&f>=E&&(h=E),h>=u&&(h=u,s=!0);var p=i+h;if(e.rangebreaks&&h>0){for(var d=0,m=0;m<84;m++){var L=(m+.5)/84;e.maskBreaks(i*(1-L)+L*p)!==I&&d++}(h*=d/84)||(t[n].drop=!0),s&&u>A&&(h=u)}(h>0||0===n)&&(t[n].periodX=i+h/2)}}(P,t,t._definedDelta),t.rangebreaks){var N="y"===t._id.charAt(0),j=1;"auto"===t.tickmode&&(j=t.tickfont?t.tickfont.size:12);var U=NaN;for(z=P.length-1;z>-1;z--)if(P[z].drop)P.splice(z,1);else{P[z].value=Mt(P[z].value,t);var H=t.c2p(P[z].value);(N?U>H-j:Uu||Gu&&(q.periodX=u),G10||"01-01"!==n.substr(5)?t._tickround="d":t._tickround=+e.substr(1)%12==0?"y":"m";else if(e>=M&&a<=10||e>=15*M)t._tickround="d";else if(e>=L&&a<=16||e>=E)t._tickround="M";else if(e>=C&&a<=19||e>=L)t._tickround="S";else{var o=t.l2r(r+e).replace(/^-/,"").length;t._tickround=Math.max(a,o)-20,t._tickround<0&&(t._tickround=4)}}else if(i(e)||"L"===e.charAt(0)){var s=t.range.map(t.r2d||Number);i(e)||(e=Number(e.substr(1))),t._tickround=2-Math.floor(Math.log(e)/Math.LN10+.01);var l=Math.max(Math.abs(s[0]),Math.abs(s[1])),c=Math.floor(Math.log(l)/Math.LN10+.01),u=void 0===t.minexponent?3:t.minexponent;Math.abs(c)>u&&(ut(t.exponentformat)&&!ft(c)?t._tickexponent=3*Math.round((c-1)/3):t._tickexponent=c)}else t._tickround=null}function lt(t,e,r){var n=t.tickfont||{};return{x:e,dx:0,dy:0,text:r||"",fontSize:n.size,font:n.family,fontColor:n.color}}V.autoTicks=function(t,e){var r;function n(t){return Math.pow(t,Math.floor(Math.log(e)/Math.LN10))}if("date"===t.type){t.tick0=s.dateTick0(t.calendar,0);var a=2*e;if(a>v)e/=v,r=n(10),t.dtick="M"+12*ot(e,r,$);else if(a>T)e/=T,t.dtick="M"+ot(e,1,tt);else if(a>M){t.dtick=ot(e,M,t._hasDayOfWeekBreaks?[1,2,7,14]:rt);var o=V.getTickFormat(t),l="period"===t.ticklabelmode;l&&(t._rawTick0=t.tick0),/%[uVW]/.test(o)?t.tick0=s.dateTick0(t.calendar,2):t.tick0=s.dateTick0(t.calendar,1),l&&(t._dowTick0=t.tick0)}else a>E?t.dtick=ot(e,E,tt):a>L?t.dtick=ot(e,L,et):a>C?t.dtick=ot(e,C,et):(r=n(10),t.dtick=ot(e,r,$))}else if("log"===t.type){t.tick0=0;var c=s.simpleMap(t.range,t.r2l);if(e>.7)t.dtick=Math.ceil(e);else if(Math.abs(c[1]-c[0])<1){var u=1.5*Math.abs((c[1]-c[0])/e);e=Math.abs(Math.pow(10,c[1])-Math.pow(10,c[0]))/u,r=n(10),t.dtick="L"+ot(e,r,$)}else t.dtick=e>.3?"D2":"D1"}else"category"===t.type||"multicategory"===t.type?(t.tick0=0,t.dtick=Math.ceil(Math.max(e,1))):At(t)?(t.tick0=0,r=1,t.dtick=ot(e,r,at)):(t.tick0=0,r=n(10),t.dtick=ot(e,r,$));if(0===t.dtick&&(t.dtick=1),!i(t.dtick)&&"string"!=typeof t.dtick){var f=t.dtick;throw t.dtick=1,"ax.dtick error: "+String(f)}},V.tickIncrement=function(t,e,r,a){var o=r?-1:1;if(i(e))return s.increment(t,o*e);var l=e.charAt(0),c=o*Number(e.substr(1));if("M"===l)return s.incrementMonth(t,c,a);if("L"===l)return Math.log(Math.pow(10,t)+c)/Math.LN10;if("D"===l){var u="D2"===e?it:nt,f=t+.01*o,h=s.roundUp(s.mod(f,1),u,r);return Math.floor(f)+Math.log(n.round(Math.pow(10,h),1))/Math.LN10}throw"unrecognized dtick "+String(e)},V.tickFirst=function(t,e){var r=t.r2l||Number,a=s.simpleMap(t.range,r,void 0,void 0,e),o=a[1] ")}else t._prevDateHead=l,c+="
"+l;e.text=c}(t,o,r,c):"log"===u?function(t,e,r,n,a){var o=t.dtick,l=e.x,c=t.tickformat,u="string"==typeof o&&o.charAt(0);"never"===a&&(a="");n&&"L"!==u&&(o="L3",u="L");if(c||"L"===u)e.text=ht(Math.pow(10,l),t,a,n);else if(i(o)||"D"===u&&s.mod(l+.01,1)<.1){var f=Math.round(l),h=Math.abs(f),p=t.exponentformat;"power"===p||ut(p)&&ft(f)?(e.text=0===f?1:1===f?"10":"10"+(f>1?"":P)+h+"",e.fontSize*=1.25):("e"===p||"E"===p)&&h>2?e.text="1"+p+(f>0?"+":P)+h:(e.text=ht(Math.pow(10,l),t,"","fakehover"),"D1"===o&&"y"===t._id.charAt(0)&&(e.dy-=e.fontSize/6))}else{if("D"!==u)throw"unrecognized dtick "+String(o);e.text=String(Math.round(Math.pow(10,s.mod(l,1)))),e.fontSize*=.75}if("D1"===t.dtick){var d=String(e.text).charAt(0);"0"!==d&&"1"!==d||("y"===t._id.charAt(0)?e.dx-=e.fontSize/4:(e.dy+=e.fontSize/2,e.dx+=(t.range[1]>t.range[0]?1:-1)*e.fontSize*(l<0?.5:.25)))}}(t,o,0,c,m):"category"===u?function(t,e){var r=t._categories[Math.round(e.x)];void 0===r&&(r="");e.text=String(r)}(t,o):"multicategory"===u?function(t,e,r){var n=Math.round(e.x),i=t._categories[n]||[],a=void 0===i[1]?"":String(i[1]),o=void 0===i[0]?"":String(i[0]);r?e.text=o+" - "+a:(e.text=a,e.text2=o)}(t,o,r):At(t)?function(t,e,r,n,i){if("radians"!==t.thetaunit||r)e.text=ht(e.x,t,i,n);else{var a=e.x/180;if(0===a)e.text="0";else{var o=function(t){function e(t,e){return Math.abs(t-e)<=1e-6}var r=function(t){for(var r=1;!e(Math.round(t*r)/r,t);)r*=10;return r}(t),n=t*r,i=Math.abs(function t(r,n){return e(n,0)?r:t(n,r%n)}(n,r));return[Math.round(n/i),Math.round(r/i)]}(a);if(o[1]>=100)e.text=ht(s.deg2rad(e.x),t,i,n);else{var l=e.x<0;1===o[1]?1===o[0]?e.text="\u03c0":e.text=o[0]+"\u03c0":e.text=["",o[0],"","\u2044","",o[1],"","\u03c0"].join(""),l&&(e.text=P+e.text)}}}}(t,o,r,c,m):function(t,e,r,n,i){"never"===i?i="":"all"===t.showexponent&&Math.abs(e.x/t.dtick)<1e-6&&(i="hide");e.text=ht(e.x,t,i,n)}(t,o,0,c,m),n||(t.tickprefix&&!d(t.showtickprefix)&&(o.text=t.tickprefix+o.text),t.ticksuffix&&!d(t.showticksuffix)&&(o.text+=t.ticksuffix)),"boundaries"===t.tickson||t.showdividers){var g=function(e){var r=t.l2p(e);return r>=0&&r<=t._length?e:null};o.xbnd=[g(o.x-.5),g(o.x+t.dtick-.5)]}return o},V.hoverLabelText=function(t,e,r){r&&(t=s.extendFlat({},t,{hoverformat:r}));var n=Array.isArray(e)?e[0]:e,i=Array.isArray(e)?e[1]:void 0;if(void 0!==i&&i!==n)return V.hoverLabelText(t,n,r)+" - "+V.hoverLabelText(t,i,r);var a="log"===t.type&&n<=0,o=V.tickText(t,t.c2l(a?-n:n),"hover").text;return a?0===n?"0":P+o:o};var ct=["f","p","n","\u03bc","m","","k","M","G","T"];function ut(t){return"SI"===t||"B"===t}function ft(t){return t>14||t<-15}function ht(t,e,r,n){var a=t<0,o=e._tickround,l=r||e.exponentformat||"B",c=e._tickexponent,u=V.getTickFormat(e),f=e.separatethousands;if(n){var h={exponentformat:l,minexponent:e.minexponent,dtick:"none"===e.showexponent?e.dtick:i(t)&&Math.abs(t)||1,range:"none"===e.showexponent?e.range.map(e.r2d):[0,t||1]};st(h),o=(Number(h._tickround)||0)+4,c=h._tickexponent,e.hoverformat&&(u=e.hoverformat)}if(u)return e._numFormat(u)(t).replace(/-/g,P);var p,d=Math.pow(10,-o)/2;if("none"===l&&(c=0),(t=Math.abs(t))"+p+"":"B"===l&&9===c?t+="B":ut(l)&&(t+=ct[c/3+5]));return a?P+t:t}function pt(t,e){for(var r=[],n={},i=0;i1&&r=i.min&&t=0,a=u(t,e[1])<=0;return(r||i)&&(n||a)}if(t.tickformatstops&&t.tickformatstops.length>0)switch(t.type){case"date":case"linear":for(e=0;e=o(i)))){r=n;break}break;case"log":for(e=0;e0?r.bottom-f:0,h)))),e.automargin){n={x:0,y:0,r:0,l:0,t:0,b:0};var p=[0,1];if("x"===d){if("b"===l?n[l]=e._depth:(n[l]=e._depth=Math.max(r.width>0?f-r.top:0,h),p.reverse()),r.width>0){var g=r.right-(e._offset+e._length);g>0&&(n.xr=1,n.r=g);var v=e._offset-r.left;v>0&&(n.xl=0,n.l=v)}}else if("l"===l?n[l]=e._depth=Math.max(r.height>0?f-r.left:0,h):(n[l]=e._depth=Math.max(r.height>0?r.right-f:0,h),p.reverse()),r.height>0){var y=r.bottom-(e._offset+e._length);y>0&&(n.yb=0,n.b=y);var x=e._offset-r.top;x>0&&(n.yt=1,n.t=x)}n[m]="free"===e.anchor?e.position:e._anchorAxis.domain[p[0]],e.title.text!==c._dfltTitle[d]&&(n[l]+=gt(e)+(e.title.standoff||0)),e.mirror&&"free"!==e.anchor&&((i={x:0,y:0,r:0,l:0,t:0,b:0})[u]=e.linewidth,e.mirror&&!0!==e.mirror&&(i[u]+=h),!0===e.mirror||"ticks"===e.mirror?i[m]=e._anchorAxis.domain[p[1]]:"all"!==e.mirror&&"allticks"!==e.mirror||(i[m]=[e._counterDomainMin,e._counterDomainMax][p[1]]))}K&&(s=o.getComponentMethod("rangeslider","autoMarginOpts")(t,e)),a.autoMargin(t,xt(e),n),a.autoMargin(t,bt(e),i),a.autoMargin(t,_t(e),s)})),r.skipTitle||K&&"bottom"===e.side||Z.push((function(){return function(t,e){var r,n=t._fullLayout,i=e._id,a=i.charAt(0),o=e.title.font.size;if(e.title.hasOwnProperty("standoff"))r=e._depth+e.title.standoff+gt(e);else{var s=St(e);if("multicategory"===e.type)r=e._depth;else{var l=1.5*o;s&&(l=.5*o,"outside"===e.ticks&&(l+=e.ticklen)),r=10+l+(e.linewidth?e.linewidth-1:0)}s||(r+="x"===a?"top"===e.side?o*(e.showticklabels?1:0):o*(e.showticklabels?1.5:.5):"right"===e.side?o*(e.showticklabels?1:.5):o*(e.showticklabels?.5:0))}var c,f,p,d,m=V.getPxPosition(t,e);"x"===a?(f=e._offset+e._length/2,p="top"===e.side?m-r:m+r):(p=e._offset+e._length/2,f="right"===e.side?m+r:m-r,c={rotate:"-90",offset:0});if("multicategory"!==e.type){var g=e._selections[e._id+"tick"];if(d={selection:g,side:e.side},g&&g.node()&&g.node().parentNode){var v=h.getTranslate(g.node().parentNode);d.offsetLeft=v.x,d.offsetTop=v.y}e.title.hasOwnProperty("standoff")&&(d.pad=0)}return u.draw(t,i+"title",{propContainer:e,propName:e._name+".title.text",placeholder:n._dfltTitle[a],avoid:d,transform:c,attributes:{x:f,y:p,"text-anchor":"middle"}})}(t,e)})),s.syncOrAsync(Z)}}function Q(t){var r=p+(t||"tick");return w[r]||(w[r]=function(t,e){var r,n,i,a;t._selections[e].size()?(r=1/0,n=-1/0,i=1/0,a=-1/0,t._selections[e].each((function(){var t=yt(this),e=h.bBox(t.node().parentNode);r=Math.min(r,e.top),n=Math.max(n,e.bottom),i=Math.min(i,e.left),a=Math.max(a,e.right)}))):(r=0,n=0,i=0,a=0);return{top:r,bottom:n,left:i,right:a,height:n-r,width:a-i}}(e,r)),w[r]}},V.getTickSigns=function(t){var e=t._id.charAt(0),r={x:"top",y:"right"}[e],n=t.side===r?1:-1,i=[-1,1,n,-n];return"inside"!==t.ticks==("x"===e)&&(i=i.map((function(t){return-t}))),t.side&&i.push({l:-1,t:-1,r:1,b:1}[t.side.charAt(0)]),i},V.makeTransTickFn=function(t){return"x"===t._id.charAt(0)?function(e){return l(t._offset+t.l2p(e.x),0)}:function(e){return l(0,t._offset+t.l2p(e.x))}},V.makeTransTickLabelFn=function(t){var e=function(t){var e=t.ticklabelposition||"",r=function(t){return-1!==e.indexOf(t)},n=r("top"),i=r("left"),a=r("right"),o=r("bottom"),s=r("inside"),l=o||i||n||a;if(!l&&!s)return[0,0];var c=t.side,u=l?(t.tickwidth||0)/2:0,f=3,h=t.tickfont?t.tickfont.size:12;(o||n)&&(u+=h*N,f+=(t.linewidth||0)/2);(i||a)&&(u+=(t.linewidth||0)/2,f+=3);s&&"top"===c&&(f-=h*(1-N));(i||n)&&(u=-u);"bottom"!==c&&"right"!==c||(f=-f);return[l?u:0,s?f:0]}(t),r=e[0],n=e[1];return"x"===t._id.charAt(0)?function(e){return l(r+t._offset+t.l2p(dt(e)),n)}:function(e){return l(n,r+t._offset+t.l2p(dt(e)))}},V.makeTickPath=function(t,e,r,n){n=void 0!==n?n:t.ticklen;var i=t._id.charAt(0),a=(t.linewidth||1)/2;return"x"===i?"M0,"+(e+a*r)+"v"+n*r:"M"+(e+a*r)+",0h"+n*r},V.makeLabelFns=function(t,e,r){var n=t.ticklabelposition||"",a=function(t){return-1!==n.indexOf(t)},o=a("top"),l=a("left"),c=a("right"),u=a("bottom")||l||o||c,f=a("inside"),h="inside"===n&&"inside"===t.ticks||!f&&"outside"===t.ticks&&"boundaries"!==t.tickson,p=0,d=0,m=h?t.ticklen:0;if(f?m*=-1:u&&(m=0),h&&(p+=m,r)){var g=s.deg2rad(r);p=m*Math.cos(g)+1,d=m*Math.sin(g)}t.showticklabels&&(h||t.showline)&&(p+=.2*t.tickfont.size);var v,y,x,b,_,w={labelStandoff:p+=(t.linewidth||1)/2*(f?-1:1),labelShift:d},T=0,k=t.side,A=t._id.charAt(0),M=t.tickangle;if("x"===A)b=(_=!f&&"bottom"===k||f&&"top"===k)?1:-1,f&&(b*=-1),v=d*b,y=e+p*b,x=_?1:-.2,90===Math.abs(M)&&(f?x+=B:x=-90===M&&"bottom"===k?N:90===M&&"top"===k?B:.5,T=B/2*(M/90)),w.xFn=function(t){return t.dx+v+T*t.fontSize},w.yFn=function(t){return t.dy+y+t.fontSize*x},w.anchorFn=function(t,e){if(u){if(l)return"end";if(c)return"start"}return i(e)&&0!==e&&180!==e?e*b<0!==f?"end":"start":"middle"},w.heightFn=function(e,r,n){return r<-60||r>60?-.5*n:"top"===t.side!==f?-n:0};else if("y"===A){if(b=(_=!f&&"left"===k||f&&"right"===k)?1:-1,f&&(b*=-1),v=p,y=d*b,x=0,f||90!==Math.abs(M)||(x=-90===M&&"left"===k||90===M&&"right"===k?N:.5),f){var S=i(M)?+M:0;if(0!==S){var E=s.deg2rad(S);T=Math.abs(Math.sin(E))*N*b,x=0}}w.xFn=function(t){return t.dx+e-(v+t.fontSize*x)*b+T*t.fontSize},w.yFn=function(t){return t.dy+y+t.fontSize*B},w.anchorFn=function(t,e){return i(e)&&90===Math.abs(e)?"middle":_?"end":"start"},w.heightFn=function(e,r,n){return"right"===t.side&&(r*=-1),r<-30?-n:r<30?-.5*n:0}}return w},V.drawTicks=function(t,e,r){r=r||{};var n=e._id+"tick",i=r.vals;"period"===e.ticklabelmode&&(i=i.slice()).shift();var a=r.layer.selectAll("path."+n).data(e.ticks?i:[],mt);a.exit().remove(),a.enter().append("path").classed(n,1).classed("ticks",1).classed("crisp",!1!==r.crisp).call(f.stroke,e.tickcolor).style("stroke-width",h.crispRound(t,e.tickwidth,1)+"px").attr("d",r.path).style("display",null),Et(e,[D]),a.attr("transform",r.transFn)},V.drawGrid=function(t,e,r){r=r||{};var n=e._id+"grid",i=r.vals,a=r.counterAxis;if(!1===e.showgrid)i=[];else if(a&&V.shouldShowZeroLine(t,e,a))for(var o="array"===e.tickmode,s=0;sp||a.leftp||a.top+(e.tickangle?0:t.fontSize/4)e["_visibleLabelMin_"+r._id]?l.style("display","none"):"tick"!==t.K||i||l.style("display",null)}))}))}))}))},x(v,g+1?g:m);var b=null;e._selections&&(e._selections[f]=v);var _=[function(){return y.length&&Promise.all(y)}];e.automargin&&a._redrawFromAutoMarginCount&&90===g?(b=90,_.push((function(){x(v,g)}))):_.push((function(){if(x(v,m),p.length&&"x"===u&&!i(m)&&("log"!==e.type||"D"!==String(e.dtick).charAt(0))){b=0;var t,n=0,a=[];if(v.each((function(t){n=Math.max(n,t.fontSize);var r=e.l2p(t.x),i=yt(this),o=h.bBox(i.node());a.push({top:0,bottom:10,height:10,left:r-o.width/2,right:r+o.width/2+2,width:o.width+2})})),"boundaries"!==e.tickson&&!e.showdividers||r.secondary){var o=p.length,l=Math.abs((p[o-1].x-p[0].x)*e._m)/(o-1),c=e.ticklabelposition||"",f=function(t){return-1!==c.indexOf(t)},d=f("top"),g=f("left"),y=f("right"),_=f("bottom")||g||d||y?(e.tickwidth||0)+6:0,w=l<2.5*n||"multicategory"===e.type||"realaxis"===e._name;for(t=0;t1)for(n=1;n2*o}(i,e))return"date";var g="strict"!==r.autotypenumbers;return function(t,e){for(var r=t.length,n=f(r),i=0,o=0,s={},u=0;u2*i}(i,g)?"category":function(t,e){for(var r=t.length,n=0;n=2){var l,c,u="";if(2===o.length)for(l=0;l<2;l++)if(c=x(o[l])){u=m;break}var f=i("pattern",u);if(f===m)for(l=0;l<2;l++)(c=x(o[l]))&&(e.bounds[l]=o[l]=c-1);if(f)for(l=0;l<2;l++)switch(c=o[l],f){case m:if(!n(c))return void(e.enabled=!1);if((c=+c)!==Math.floor(c)||c<0||c>=7)return void(e.enabled=!1);e.bounds[l]=o[l]=c;break;case g:if(!n(c))return void(e.enabled=!1);if((c=+c)<0||c>24)return void(e.enabled=!1);e.bounds[l]=o[l]=c}if(!1===r.autorange){var h=r.range;if(h[0]h[1])return void(e.enabled=!1)}else if(o[0]>h[0]&&o[1]n?1:-1:+(t.substr(1)||1)-+(e.substr(1)||1)},r.ref2id=function(t){return!!/^[xyz]/.test(t)&&t.split(" ")[0]},r.isLinked=function(t,e){return a(e,t._axisMatchGroups)||a(e,t._axisConstraintGroups)}},{"../../registry":633,"./constants":556}],554:[function(t,e,r){"use strict";e.exports=function(t,e,r,n){if("category"===e.type){var i,a=t.categoryarray,o=Array.isArray(a)&&a.length>0;o&&(i="array");var s,l=r("categoryorder",i);"array"===l&&(s=r("categoryarray")),o||"array"!==l||(l=e.categoryorder="trace"),"trace"===l?e._initialCategories=[]:"array"===l?e._initialCategories=s.slice():(s=function(t,e){var r,n,i,a=e.dataAttr||t._id.charAt(0),o={};if(e.axData)r=e.axData;else for(r=[],n=0;nn?i.substr(n):a.substr(r))+o:i+a+t*e:o}function g(t,e){for(var r=e._size,n=r.h/r.w,i={},a=Object.keys(t),o=0;oc*x)||T)for(r=0;rO&&FP&&(P=F);h/=(P-C)/(2*I),C=l.l2r(C),P=l.l2r(P),l.range=l._input.range=S=0?Math.min(t,.9):1/(1/Math.max(t,-.3)+3.222))}function N(t,e,r,n,i){return t.append("path").attr("class","zoombox").style({fill:e>.2?"rgba(0,0,0,0)":"rgba(255,255,255,0)","stroke-width":0}).attr("transform",c(r,n)).attr("d",i+"Z")}function j(t,e,r){return t.append("path").attr("class","zoombox-corners").style({fill:f.background,stroke:f.defaultLine,"stroke-width":1,opacity:0}).attr("transform",c(e,r)).attr("d","M0,0Z")}function U(t,e,r,n,i,a){t.attr("d",n+"M"+r.l+","+r.t+"v"+r.h+"h"+r.w+"v-"+r.h+"h-"+r.w+"Z"),V(t,e,i,a)}function V(t,e,r,n){r||(t.transition().style("fill",n>.2?"rgba(0,0,0,0.4)":"rgba(255,255,255,0.3)").duration(200),e.transition().style("opacity",1).duration(200))}function H(t){n.select(t).selectAll(".zoombox,.js-zoombox-backdrop,.js-zoombox-menu,.zoombox-corners").remove()}function q(t){I&&t.data&&t._context.showTips&&(i.notifier(i._(t,"Double-click to zoom back out"),"long"),I=!1)}function G(t){var e=Math.floor(Math.min(t.b-t.t,t.r-t.l,P)/2);return"M"+(t.l-3.5)+","+(t.t-.5+e)+"h3v"+-e+"h"+e+"v-3h-"+(e+3)+"ZM"+(t.r+3.5)+","+(t.t-.5+e)+"h-3v"+-e+"h"+-e+"v-3h"+(e+3)+"ZM"+(t.r+3.5)+","+(t.b+.5-e)+"h-3v"+e+"h"+-e+"v3h"+(e+3)+"ZM"+(t.l-3.5)+","+(t.b+.5-e)+"h3v"+e+"h"+e+"v3h-"+(e+3)+"Z"}function Y(t,e,r,n,a){for(var o,s,l,c,u=!1,f={},h={},p=(a||{}).xaHash,d=(a||{}).yaHash,m=0;m=0)i._fullLayout._deactivateShape(i);else{var o=i._fullLayout.clickmode;if(H(i),2!==t||vt||qt(),gt)o.indexOf("select")>-1&&S(r,i,J,K,e.id,Pt),o.indexOf("event")>-1&&p.click(i,r,e.id);else if(1===t&&vt){var s=m?O:I,c="s"===m||"w"===v?0:1,f=s._name+".range["+c+"]",h=function(t,e){var r,n=t.range[e],i=Math.abs(n-t.range[1-e]);return"date"===t.type?n:"log"===t.type?(r=Math.ceil(Math.max(0,-Math.log(i)/Math.LN10))+3,a("."+r+"g")(Math.pow(10,n))):(r=Math.floor(Math.log(Math.abs(n))/Math.LN10)-Math.floor(Math.log(i)/Math.LN10)+4,a("."+String(r)+"g")(n))}(s,c),d="left",g="middle";if(s.fixedrange)return;m?(g="n"===m?"top":"bottom","right"===s.side&&(d="right")):"e"===v&&(d="right"),i._context.showAxisRangeEntryBoxes&&n.select(bt).call(u.makeEditable,{gd:i,immediate:!0,background:i._fullLayout.paper_bgcolor,text:String(h),fill:s.tickfont?s.tickfont.color:"#444",horizontalAlign:d,verticalAlign:g}).on("edit",(function(t){var e=s.d2r(t);void 0!==e&&l.call("_guiRelayout",i,f,e)}))}}}function zt(e,r){if(t._transitioningWithDuration)return!1;var n=Math.max(0,Math.min(tt,pt*e+_t)),i=Math.max(0,Math.min(et,dt*r+wt)),a=Math.abs(n-_t),o=Math.abs(i-wt);function s(){St="",Tt.r=Tt.l,Tt.t=Tt.b,Lt.attr("d","M0,0Z")}if(Tt.l=Math.min(_t,n),Tt.r=Math.max(_t,n),Tt.t=Math.min(wt,i),Tt.b=Math.max(wt,i),rt.isSubplotConstrained)a>P||o>P?(St="xy",a/tt>o/et?(o=a*et/tt,wt>i?Tt.t=wt-o:Tt.b=wt+o):(a=o*tt/et,_t>n?Tt.l=_t-a:Tt.r=_t+a),Lt.attr("d",G(Tt))):s();else if(nt.isSubplotConstrained)if(a>P||o>P){St="xy";var l=Math.min(Tt.l/tt,(et-Tt.b)/et),c=Math.max(Tt.r/tt,(et-Tt.t)/et);Tt.l=l*tt,Tt.r=c*tt,Tt.b=(1-l)*et,Tt.t=(1-c)*et,Lt.attr("d",G(Tt))}else s();else!at||o0){var u;if(nt.isSubplotConstrained||!it&&1===at.length){for(u=0;um[1]-1/4096&&(e.domain=s),i.noneOrAll(t.domain,e.domain,s)}return r("layer"),e}},{"../../lib":498,"fast-isnumeric":185}],568:[function(t,e,r){"use strict";var n=t("./show_dflt");e.exports=function(t,e,r,i,a){a||(a={});var o=a.tickSuffixDflt,s=n(t);r("tickprefix")&&r("showtickprefix",s),r("ticksuffix",o)&&r("showticksuffix",s)}},{"./show_dflt":572}],569:[function(t,e,r){"use strict";var n=t("../../constants/alignment").FROM_BL;e.exports=function(t,e,r){void 0===r&&(r=n[t.constraintoward||"center"]);var i=[t.r2l(t.range[0]),t.r2l(t.range[1])],a=i[0]+(i[1]-i[0])*r;t.range=t._input.range=[t.l2r(a+(i[0]-a)*e),t.l2r(a+(i[1]-a)*e)],t.setScale()}},{"../../constants/alignment":466}],570:[function(t,e,r){"use strict";var n=t("polybooljs"),i=t("../../registry"),a=t("../../components/drawing").dashStyle,o=t("../../components/color"),s=t("../../components/fx"),l=t("../../components/fx/helpers").makeEventData,c=t("../../components/dragelement/helpers"),u=c.freeMode,f=c.rectMode,h=c.drawMode,p=c.openMode,d=c.selectMode,m=t("../../components/shapes/draw_newshape/display_outlines"),g=t("../../components/shapes/draw_newshape/helpers").handleEllipse,v=t("../../components/shapes/draw_newshape/newshapes"),y=t("../../lib"),x=t("../../lib/polygon"),b=t("../../lib/throttle"),_=t("./axis_ids").getFromId,w=t("../../lib/clear_gl_canvases"),T=t("../../plot_api/subroutines").redrawReglTraces,k=t("./constants"),A=k.MINSELECT,M=x.filter,S=x.tester,E=t("./handle_outline").clearSelect,L=t("./helpers"),C=L.p2r,P=L.axValue,I=L.getTransform;function O(t,e,r,n,i,a,o){var s,l,c,u,f,h,d,g,v,y=e._hoverdata,x=e._fullLayout.clickmode.indexOf("event")>-1,b=[];if(function(t){return t&&Array.isArray(t)&&!0!==t[0].hoverOnBox}(y)){F(t,e,a);var _=function(t,e){var r,n,i=t[0],a=-1,o=[];for(n=0;n0?function(t,e){var r,n,i,a=[];for(i=0;i0&&a.push(r);if(1===a.length&&a[0]===e.searchInfo&&(n=e.searchInfo.cd[0].trace).selectedpoints.length===e.pointNumbers.length){for(i=0;i1)return!1;if((i+=r.selectedpoints.length)>1)return!1}return 1===i}(s)&&(h=j(_))){for(o&&o.remove(),v=0;v=0&&n._fullLayout._deactivateShape(n),h(e)){var a=n._fullLayout._zoomlayer.selectAll(".select-outline-"+r.id);if(a&&n._fullLayout._drawing){var o=v(a,t);o&&i.call("_guiRelayout",n,{shapes:o}),n._fullLayout._drawing=!1}}r.selection={},r.selection.selectionDefs=t.selectionDefs=[],r.selection.mergedPolygons=t.mergedPolygons=[]}function N(t,e,r,n){var i,a,o,s=[],l=e.map((function(t){return t._id})),c=r.map((function(t){return t._id}));for(o=0;o0?n[0]:r;return!!e.selectedpoints&&e.selectedpoints.indexOf(i)>-1}function U(t,e,r){var n,a,o,s;for(n=0;n=0)L._fullLayout._deactivateShape(L);else if(!_){var r=z.clickmode;b.done(mt).then((function(){if(b.clear(mt),2===t){for(ft.remove(),$=0;$-1&&O(e,L,i.xaxes,i.yaxes,i.subplot,i,ft),"event"===r&&L.emit("plotly_selected",void 0);s.click(L,e)})).catch(y.error)}},i.doneFn=function(){dt.remove(),b.done(mt).then((function(){b.clear(mt),i.gd.emit("plotly_selected",et),Q&&i.selectionDefs&&(Q.subtract=ut,i.selectionDefs.push(Q),i.mergedPolygons.length=0,[].push.apply(i.mergedPolygons,K)),i.doneFnCompleted&&i.doneFnCompleted(gt)})).catch(y.error),_&&B(i)}},clearSelect:E,clearSelectionsCache:B,selectOnClick:O}},{"../../components/color":361,"../../components/dragelement/helpers":379,"../../components/drawing":383,"../../components/fx":401,"../../components/fx/helpers":397,"../../components/shapes/draw_newshape/display_outlines":449,"../../components/shapes/draw_newshape/helpers":450,"../../components/shapes/draw_newshape/newshapes":451,"../../lib":498,"../../lib/clear_gl_canvases":482,"../../lib/polygon":510,"../../lib/throttle":525,"../../plot_api/subroutines":539,"../../registry":633,"./axis_ids":553,"./constants":556,"./handle_outline":560,"./helpers":561,polybooljs:249}],571:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("d3-time-format").utcFormat,a=t("../../lib"),o=a.numberFormat,s=t("fast-isnumeric"),l=a.cleanNumber,c=a.ms2DateTime,u=a.dateTime2ms,f=a.ensureNumber,h=a.isArrayOrTypedArray,p=t("../../constants/numerical"),d=p.FP_SAFE,m=p.BADNUM,g=p.LOG_CLIP,v=p.ONEWEEK,y=p.ONEDAY,x=p.ONEHOUR,b=p.ONEMIN,_=p.ONESEC,w=t("./axis_ids"),T=t("./constants"),k=T.HOUR_PATTERN,A=T.WEEKDAY_PATTERN;function M(t){return Math.pow(10,t)}function S(t){return null!=t}e.exports=function(t,e){e=e||{};var r=t._id||"x",p=r.charAt(0);function E(e,r){if(e>0)return Math.log(e)/Math.LN10;if(e<=0&&r&&t.range&&2===t.range.length){var n=t.range[0],i=t.range[1];return.5*(n+i-2*g*Math.abs(n-i))}return m}function L(e,r,n,i){if((i||{}).msUTC&&s(e))return+e;var o=u(e,n||t.calendar);if(o===m){if(!s(e))return m;e=+e;var l=Math.floor(10*a.mod(e+.05,1)),c=Math.round(e-l/10);o=u(new Date(c))+l/10}return o}function C(e,r,n){return c(e,r,n||t.calendar)}function P(e){return t._categories[Math.round(e)]}function I(e){if(S(e)){if(void 0===t._categoriesMap&&(t._categoriesMap={}),void 0!==t._categoriesMap[e])return t._categoriesMap[e];t._categories.push("number"==typeof e?String(e):e);var r=t._categories.length-1;return t._categoriesMap[e]=r,r}return m}function O(e){if(t._categoriesMap)return t._categoriesMap[e]}function z(t){var e=O(t);return void 0!==e?e:s(t)?+t:void 0}function D(t){return s(t)?+t:O(t)}function R(t,e,r){return n.round(r+e*t,2)}function F(t,e,r){return(t-r)/e}var B=function(e){return s(e)?R(e,t._m,t._b):m},N=function(e){return F(e,t._m,t._b)};if(t.rangebreaks){var j="y"===p;B=function(e){if(!s(e))return m;var r=t._rangebreaks.length;if(!r)return R(e,t._m,t._b);var n=j;t.range[0]>t.range[1]&&(n=!n);for(var i=n?-1:1,a=i*e,o=0,l=0;lu)){o=a<(c+u)/2?l:l+1;break}o=l+1}var f=t._B[o]||0;return isFinite(f)?R(e,t._m2,f):0},N=function(e){var r=t._rangebreaks.length;if(!r)return F(e,t._m,t._b);for(var n=0,i=0;it._rangebreaks[i].pmax&&(n=i+1);return F(e,t._m2,t._B[n])}}t.c2l="log"===t.type?E:f,t.l2c="log"===t.type?M:f,t.l2p=B,t.p2l=N,t.c2p="log"===t.type?function(t,e){return B(E(t,e))}:B,t.p2c="log"===t.type?function(t){return M(N(t))}:N,-1!==["linear","-"].indexOf(t.type)?(t.d2r=t.r2d=t.d2c=t.r2c=t.d2l=t.r2l=l,t.c2d=t.c2r=t.l2d=t.l2r=f,t.d2p=t.r2p=function(e){return t.l2p(l(e))},t.p2d=t.p2r=N,t.cleanPos=f):"log"===t.type?(t.d2r=t.d2l=function(t,e){return E(l(t),e)},t.r2d=t.r2c=function(t){return M(l(t))},t.d2c=t.r2l=l,t.c2d=t.l2r=f,t.c2r=E,t.l2d=M,t.d2p=function(e,r){return t.l2p(t.d2r(e,r))},t.p2d=function(t){return M(N(t))},t.r2p=function(e){return t.l2p(l(e))},t.p2r=N,t.cleanPos=f):"date"===t.type?(t.d2r=t.r2d=a.identity,t.d2c=t.r2c=t.d2l=t.r2l=L,t.c2d=t.c2r=t.l2d=t.l2r=C,t.d2p=t.r2p=function(e,r,n){return t.l2p(L(e,0,n))},t.p2d=t.p2r=function(t,e,r){return C(N(t),e,r)},t.cleanPos=function(e){return a.cleanDate(e,m,t.calendar)}):"category"===t.type?(t.d2c=t.d2l=I,t.r2d=t.c2d=t.l2d=P,t.d2r=t.d2l_noadd=z,t.r2c=function(e){var r=D(e);return void 0!==r?r:t.fraction2r(.5)},t.l2r=t.c2r=f,t.r2l=D,t.d2p=function(e){return t.l2p(t.r2c(e))},t.p2d=function(t){return P(N(t))},t.r2p=t.d2p,t.p2r=N,t.cleanPos=function(t){return"string"==typeof t&&""!==t?t:f(t)}):"multicategory"===t.type&&(t.r2d=t.c2d=t.l2d=P,t.d2r=t.d2l_noadd=z,t.r2c=function(e){var r=z(e);return void 0!==r?r:t.fraction2r(.5)},t.r2c_just_indices=O,t.l2r=t.c2r=f,t.r2l=z,t.d2p=function(e){return t.l2p(t.r2c(e))},t.p2d=function(t){return P(N(t))},t.r2p=t.d2p,t.p2r=N,t.cleanPos=function(t){return Array.isArray(t)||"string"==typeof t&&""!==t?t:f(t)},t.setupMultiCategory=function(n){var i,o,s=t._traceIndices,l=t._matchGroup;if(l&&0===t._categories.length)for(var c in l)if(c!==r){var u=e[w.id2name(c)];s=s.concat(u._traceIndices)}var f=[[0,{}],[0,{}]],d=[];for(i=0;id&&(o[n]=d),o[0]===o[1]){var c=Math.max(1,Math.abs(1e-6*o[0]));o[0]-=c,o[1]+=c}}else a.nestedProperty(t,e).set(i)},t.setScale=function(r){var n=e._size;if(t.overlaying){var i=w.getFromId({_fullLayout:e},t.overlaying);t.domain=i.domain}var a=r&&t._r?"_r":"range",o=t.calendar;t.cleanRange(a);var s,l,c=t.r2l(t[a][0],o),u=t.r2l(t[a][1],o),f="y"===p;if((f?(t._offset=n.t+(1-t.domain[1])*n.h,t._length=n.h*(t.domain[1]-t.domain[0]),t._m=t._length/(c-u),t._b=-t._m*u):(t._offset=n.l+t.domain[0]*n.w,t._length=n.w*(t.domain[1]-t.domain[0]),t._m=t._length/(u-c),t._b=-t._m*c),t._rangebreaks=[],t._lBreaks=0,t._m2=0,t._B=[],t.rangebreaks)&&(t._rangebreaks=t.locateBreaks(Math.min(c,u),Math.max(c,u)),t._rangebreaks.length)){for(s=0;su&&(h=!h),h&&t._rangebreaks.reverse();var d=h?-1:1;for(t._m2=d*t._length/(Math.abs(u-c)-t._lBreaks),t._B.push(-t._m2*(f?u:c)),s=0;si&&(i+=7,oi&&(i+=24,o=n&&o=n&&e=s.min&&(ts.max&&(s.max=n),i=!1)}i&&c.push({min:t,max:n})}};for(n=0;nr.duration?(!function(){for(var r={},n=0;n rect").call(o.setTranslate,0,0).call(o.setScale,1,1),t.plot.call(o.setTranslate,e._offset,r._offset).call(o.setScale,1,1);var n=t.plot.selectAll(".scatterlayer .trace");n.selectAll(".point").call(o.setPointGroupScale,1,1),n.selectAll(".textpoint").call(o.setTextPointsScale,1,1),n.call(o.hideOutsideRangePoints,t)}function g(e,r){var n=e.plotinfo,i=n.xaxis,l=n.yaxis,c=i._length,u=l._length,f=!!e.xr1,h=!!e.yr1,p=[];if(f){var d=a.simpleMap(e.xr0,i.r2l),m=a.simpleMap(e.xr1,i.r2l),g=d[1]-d[0],v=m[1]-m[0];p[0]=(d[0]*(1-r)+r*m[0]-d[0])/(d[1]-d[0])*c,p[2]=c*(1-r+r*v/g),i.range[0]=i.l2r(d[0]*(1-r)+r*m[0]),i.range[1]=i.l2r(d[1]*(1-r)+r*m[1])}else p[0]=0,p[2]=c;if(h){var y=a.simpleMap(e.yr0,l.r2l),x=a.simpleMap(e.yr1,l.r2l),b=y[1]-y[0],_=x[1]-x[0];p[1]=(y[1]*(1-r)+r*x[1]-y[1])/(y[0]-y[1])*u,p[3]=u*(1-r+r*_/b),l.range[0]=i.l2r(y[0]*(1-r)+r*x[0]),l.range[1]=l.l2r(y[1]*(1-r)+r*x[1])}else p[1]=0,p[3]=u;s.drawOne(t,i,{skipTitle:!0}),s.drawOne(t,l,{skipTitle:!0}),s.redrawComponents(t,[i._id,l._id]);var w=f?c/p[2]:1,T=h?u/p[3]:1,k=f?p[0]:0,A=h?p[1]:0,M=f?p[0]/p[2]*c:0,S=h?p[1]/p[3]*u:0,E=i._offset-M,L=l._offset-S;n.clipRect.call(o.setTranslate,k,A).call(o.setScale,1/w,1/T),n.plot.call(o.setTranslate,E,L).call(o.setScale,w,T),o.setPointGroupScale(n.zoomScalePts,1/w,1/T),o.setTextPointsScale(n.zoomScaleTxt,1/w,1/T)}s.redrawComponents(t)}},{"../../components/drawing":383,"../../lib":498,"../../registry":633,"./axes":549,"@plotly/d3":58}],577:[function(t,e,r){"use strict";var n=t("../../registry").traceIs,i=t("./axis_autotype");function a(t){return{v:"x",h:"y"}[t.orientation||"v"]}function o(t,e){var r=a(t),i=n(t,"box-violin"),o=n(t._fullInput||{},"candlestick");return i&&!o&&e===r&&void 0===t[r]&&void 0===t[r+"0"]}e.exports=function(t,e,r,s){r("autotypenumbers",s.autotypenumbersDflt),"-"===r("type",(s.splomStash||{}).type)&&(!function(t,e){if("-"!==t.type)return;var r,s=t._id,l=s.charAt(0);-1!==s.indexOf("scene")&&(s=l);var c=function(t,e,r){for(var n=0;n0&&(i["_"+r+"axes"]||{})[e])return i;if((i[r+"axis"]||r)===e){if(o(i,r))return i;if((i[r]||[]).length||i[r+"0"])return i}}}(e,s,l);if(!c)return;if("histogram"===c.type&&l==={v:"y",h:"x"}[c.orientation||"v"])return void(t.type="linear");var u=l+"calendar",f=c[u],h={noMultiCategory:!n(c,"cartesian")||n(c,"noMultiCategory")};"box"===c.type&&c._hasPreCompStats&&l==={h:"x",v:"y"}[c.orientation||"v"]&&(h.noMultiCategory=!0);if(h.autotypenumbers=t.autotypenumbers,o(c,l)){var p=a(c),d=[];for(r=0;r0?".":"")+a;i.isPlainObject(o)?l(o,e,s,n+1):e(s,a,o)}}))}r.manageCommandObserver=function(t,e,n,o){var s={},l=!0;e&&e._commandObserver&&(s=e._commandObserver),s.cache||(s.cache={}),s.lookupTable={};var c=r.hasSimpleAPICommandBindings(t,n,s.lookupTable);if(e&&e._commandObserver){if(c)return s;if(e._commandObserver.remove)return e._commandObserver.remove(),e._commandObserver=null,s}if(c){a(t,c,s.cache),s.check=function(){if(l){var e=a(t,c,s.cache);return e.changed&&o&&void 0!==s.lookupTable[e.value]&&(s.disable(),Promise.resolve(o({value:e.value,type:c.type,prop:c.prop,traces:c.traces,index:s.lookupTable[e.value]})).then(s.enable,s.enable)),e.changed}};for(var u=["plotly_relayout","plotly_redraw","plotly_restyle","plotly_update","plotly_animatingframe","plotly_afterplot"],f=0;f0&&i<0&&(i+=360);var s=(i-n)/4;return{type:"Polygon",coordinates:[[[n,a],[n,o],[n+s,o],[n+2*s,o],[n+3*s,o],[i,o],[i,a],[i-s,a],[i-2*s,a],[i-3*s,a],[n,a]]]}}e.exports=function(t){return new M(t)},S.plot=function(t,e,r){var n=this,i=e[this.id],a=[],o=!1;for(var s in w.layerNameToAdjective)if("frame"!==s&&i["show"+s]){o=!0;break}for(var l=0;l0&&a._module.calcGeoJSON(i,e)}if(!this.updateProjection(t,e)){this.viewInitial&&this.scope===r.scope||this.saveViewInitial(r),this.scope=r.scope,this.updateBaseLayers(e,r),this.updateDims(e,r),this.updateFx(e,r),d.generalUpdatePerTraceModule(this.graphDiv,this,t,r);var o=this.layers.frontplot.select(".scatterlayer");this.dataPoints.point=o.selectAll(".point"),this.dataPoints.text=o.selectAll("text"),this.dataPaths.line=o.selectAll(".js-line");var s=this.layers.backplot.select(".choroplethlayer");this.dataPaths.choropleth=s.selectAll("path"),this.render()}},S.updateProjection=function(t,e){var r=this.graphDiv,n=e[this.id],l=e._size,u=n.domain,f=n.projection,h=n.lonaxis,p=n.lataxis,d=h._ax,m=p._ax,v=this.projection=function(t){var e=t.projection,r=e.type,n=w.projNames[r];n="geo"+c.titleCase(n);for(var l=(i[n]||s[n])(),u=t._isSatellite?180*Math.acos(1/e.distance)/Math.PI:t._isClipped?w.lonaxisSpan[r]/2:null,f=["center","rotate","parallels","clipExtent"],h=function(t){return t?l:[]},p=0;pu*Math.PI/180}return!1},l.getPath=function(){return a().projection(l)},l.getBounds=function(t){return l.getPath().bounds(t)},l.precision(w.precision),t._isSatellite&&l.tilt(e.tilt).distance(e.distance);u&&l.clipAngle(u-w.clipPad);return l}(n),y=[[l.l+l.w*u.x[0],l.t+l.h*(1-u.y[1])],[l.l+l.w*u.x[1],l.t+l.h*(1-u.y[0])]],x=n.center||{},b=f.rotation||{},_=h.range||[],T=p.range||[];if(n.fitbounds){d._length=y[1][0]-y[0][0],m._length=y[1][1]-y[0][1],d.range=g(r,d),m.range=g(r,m);var k=(d.range[0]+d.range[1])/2,A=(m.range[0]+m.range[1])/2;if(n._isScoped)x={lon:k,lat:A};else if(n._isClipped){x={lon:k,lat:A},b={lon:k,lat:A,roll:b.roll};var M=f.type,S=w.lonaxisSpan[M]/2||180,L=w.lataxisSpan[M]/2||90;_=[k-S,k+S],T=[A-L,A+L]}else x={lon:k,lat:A},b={lon:k,lat:b.lat,roll:b.roll}}v.center([x.lon-b.lon,x.lat-b.lat]).rotate([-b.lon,-b.lat,b.roll]).parallels(f.parallels);var C=E(_,T);v.fitExtent(y,C);var P=this.bounds=v.getBounds(C),I=this.fitScale=v.scale(),O=v.translate();if(n.fitbounds){var z=v.getBounds(E(d.range,m.range)),D=Math.min((P[1][0]-P[0][0])/(z[1][0]-z[0][0]),(P[1][1]-P[0][1])/(z[1][1]-z[0][1]));isFinite(D)?v.scale(D*I):c.warn("Something went wrong during"+this.id+"fitbounds computations.")}else v.scale(f.scale*I);var R=this.midPt=[(P[0][0]+P[1][0])/2,(P[0][1]+P[1][1])/2];if(v.translate([O[0]+(R[0]-O[0]),O[1]+(R[1]-O[1])]).clipExtent(P),n._isAlbersUsa){var F=v([x.lon,x.lat]),B=v.translate();v.translate([B[0]-(F[0]-B[0]),B[1]-(F[1]-B[1])])}},S.updateBaseLayers=function(t,e){var r=this,i=r.topojson,a=r.layers,o=r.basePaths;function s(t){return"lonaxis"===t||"lataxis"===t}function l(t){return Boolean(w.lineLayers[t])}function c(t){return Boolean(w.fillLayers[t])}var u=(this.hasChoropleth?w.layersForChoropleth:w.layers).filter((function(t){return l(t)||c(t)?e["show"+t]:!s(t)||e[t].showgrid})),p=r.framework.selectAll(".layer").data(u,String);p.exit().each((function(t){delete a[t],delete o[t],n.select(this).remove()})),p.enter().append("g").attr("class",(function(t){return"layer "+t})).each((function(t){var e=a[t]=n.select(this);"bg"===t?r.bgRect=e.append("rect").style("pointer-events","all"):s(t)?o[t]=e.append("path").style("fill","none"):"backplot"===t?e.append("g").classed("choroplethlayer",!0):"frontplot"===t?e.append("g").classed("scatterlayer",!0):l(t)?o[t]=e.append("path").style("fill","none").style("stroke-miterlimit",2):c(t)&&(o[t]=e.append("path").style("stroke","none"))})),p.order(),p.each((function(r){var n=o[r],a=w.layerNameToAdjective[r];"frame"===r?n.datum(w.sphereSVG):l(r)||c(r)?n.datum(A(i,i.objects[r])):s(r)&&n.datum(function(t,e,r){var n,i,a,o=e[t],s=w.scopeDefaults[e.scope];"lonaxis"===t?(n=s.lonaxisRange,i=s.lataxisRange,a=function(t,e){return[t,e]}):"lataxis"===t&&(n=s.lataxisRange,i=s.lonaxisRange,a=function(t,e){return[e,t]});var l={type:"linear",range:[n[0],n[1]-1e-6],tick0:o.tick0,dtick:o.dtick};m.setConvert(l,r);var c=m.calcTicks(l);e.isScoped||"lonaxis"!==t||c.pop();for(var u=c.length,f=new Array(u),h=0;h-1&&b(n.event,i,[r.xaxis],[r.yaxis],r.id,f),s.indexOf("event")>-1&&p.click(i,n.event))}))}function h(t){return r.projection.invert([t[0]+r.xaxis._offset,t[1]+r.yaxis._offset])}},S.makeFramework=function(){var t=this,e=t.graphDiv,r=e._fullLayout,i="clip"+r._uid+t.id;t.clipDef=r._clips.append("clipPath").attr("id",i),t.clipRect=t.clipDef.append("rect"),t.framework=n.select(t.container).append("g").attr("class","geo "+t.id).call(h.setClipUrl,i,e),t.project=function(e){var r=t.projection(e);return r?[r[0]-t.xaxis._offset,r[1]-t.yaxis._offset]:[null,null]},t.xaxis={_id:"x",c2p:function(e){return t.project(e)[0]}},t.yaxis={_id:"y",c2p:function(e){return t.project(e)[1]}},t.mockAxis={type:"linear",showexponent:"all",exponentformat:"B"},m.setConvert(t.mockAxis,r)},S.saveViewInitial=function(t){var e,r=t.center||{},n=t.projection,i=n.rotation||{};this.viewInitial={fitbounds:t.fitbounds,"projection.scale":n.scale},e=t._isScoped?{"center.lon":r.lon,"center.lat":r.lat}:t._isClipped?{"projection.rotation.lon":i.lon,"projection.rotation.lat":i.lat}:{"center.lon":r.lon,"center.lat":r.lat,"projection.rotation.lon":i.lon},c.extendFlat(this.viewInitial,e)},S.render=function(){var t,e=this.projection,r=e.getPath();function n(t){var r=e(t.lonlat);return r?u(r[0],r[1]):null}function i(t){return e.isLonLatOverEdges(t.lonlat)?"none":null}for(t in this.basePaths)this.basePaths[t].attr("d",r);for(t in this.dataPaths)this.dataPaths[t].attr("d",(function(t){return r(t.geojson)}));for(t in this.dataPoints)this.dataPoints[t].attr("display",i).attr("transform",n)}},{"../../components/color":361,"../../components/dragelement":380,"../../components/drawing":383,"../../components/fx":401,"../../lib":498,"../../lib/geo_location_utils":491,"../../lib/topojson_utils":527,"../../registry":633,"../cartesian/autorange":548,"../cartesian/axes":549,"../cartesian/select":570,"../plots":614,"./constants":582,"./zoom":587,"@plotly/d3":58,"d3-geo":109,"d3-geo-projection":108,"topojson-client":310}],584:[function(t,e,r){"use strict";var n=t("../../plots/get_data").getSubplotCalcData,i=t("../../lib").counterRegex,a=t("./geo"),o="geo",s=i(o),l={};l.geo={valType:"subplotid",dflt:o,editType:"calc"},e.exports={attr:o,name:o,idRoot:o,idRegex:s,attrRegex:s,attributes:l,layoutAttributes:t("./layout_attributes"),supplyLayoutDefaults:t("./layout_defaults"),plot:function(t){for(var e=t._fullLayout,r=t.calcdata,i=e._subplots.geo,s=0;s0&&P<0&&(P+=360);var I,O,z,D=(C+P)/2;if(!p){var R=d?f.projRotate:[D,0,0];I=r("projection.rotation.lon",R[0]),r("projection.rotation.lat",R[1]),r("projection.rotation.roll",R[2]),r("showcoastlines",!d&&x)&&(r("coastlinecolor"),r("coastlinewidth")),r("showocean",!!x&&void 0)&&r("oceancolor")}(p?(O=-96.6,z=38.7):(O=d?D:I,z=(L[0]+L[1])/2),r("center.lon",O),r("center.lat",z),m&&(r("projection.tilt"),r("projection.distance")),g)&&r("projection.parallels",f.projParallels||[0,60]);r("projection.scale"),r("showland",!!x&&void 0)&&r("landcolor"),r("showlakes",!!x&&void 0)&&r("lakecolor"),r("showrivers",!!x&&void 0)&&(r("rivercolor"),r("riverwidth")),r("showcountries",d&&"usa"!==u&&x)&&(r("countrycolor"),r("countrywidth")),("usa"===u||"north america"===u&&50===c)&&(r("showsubunits",x),r("subunitcolor"),r("subunitwidth")),d||r("showframe",x)&&(r("framecolor"),r("framewidth")),r("bgcolor"),r("fitbounds")&&(delete e.projection.scale,d?(delete e.center.lon,delete e.center.lat):v?(delete e.center.lon,delete e.center.lat,delete e.projection.rotation.lon,delete e.projection.rotation.lat,delete e.lonaxis.range,delete e.lataxis.range):(delete e.center.lon,delete e.center.lat,delete e.projection.rotation.lon))}e.exports=function(t,e,r){i(t,e,r,{type:"geo",attributes:s,handleDefaults:c,fullData:r,partition:"y"})}},{"../../lib":498,"../get_data":588,"../subplot_defaults":627,"./constants":582,"./layout_attributes":585}],587:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../../registry"),o=Math.PI/180,s=180/Math.PI,l={cursor:"pointer"},c={cursor:"auto"};function u(t,e){return n.behavior.zoom().translate(e.translate()).scale(e.scale())}function f(t,e,r){var n=t.id,o=t.graphDiv,s=o.layout,l=s[n],c=o._fullLayout,u=c[n],f={},h={};function p(t,e){f[n+"."+t]=i.nestedProperty(l,t).get(),a.call("_storeDirectGUIEdit",s,c._preGUI,f);var r=i.nestedProperty(u,t);r.get()!==e&&(r.set(e),i.nestedProperty(l,t).set(e),h[n+"."+t]=e)}r(p),p("projection.scale",e.scale()/t.fitScale),p("fitbounds",!1),o.emit("plotly_relayout",h)}function h(t,e){var r=u(0,e);function i(r){var n=e.invert(t.midPt);r("center.lon",n[0]),r("center.lat",n[1])}return r.on("zoomstart",(function(){n.select(this).style(l)})).on("zoom",(function(){e.scale(n.event.scale).translate(n.event.translate),t.render();var r=e.invert(t.midPt);t.graphDiv.emit("plotly_relayouting",{"geo.projection.scale":e.scale()/t.fitScale,"geo.center.lon":r[0],"geo.center.lat":r[1]})})).on("zoomend",(function(){n.select(this).style(c),f(t,e,i)})),r}function p(t,e){var r,i,a,o,s,h,p,d,m,g=u(0,e);function v(t){return e.invert(t)}function y(r){var n=e.rotate(),i=e.invert(t.midPt);r("projection.rotation.lon",-n[0]),r("center.lon",i[0]),r("center.lat",i[1])}return g.on("zoomstart",(function(){n.select(this).style(l),r=n.mouse(this),i=e.rotate(),a=e.translate(),o=i,s=v(r)})).on("zoom",(function(){if(h=n.mouse(this),function(t){var r=v(t);if(!r)return!0;var n=e(r);return Math.abs(n[0]-t[0])>2||Math.abs(n[1]-t[1])>2}(r))return g.scale(e.scale()),void g.translate(e.translate());e.scale(n.event.scale),e.translate([a[0],n.event.translate[1]]),s?v(h)&&(d=v(h),p=[o[0]+(d[0]-s[0]),i[1],i[2]],e.rotate(p),o=p):s=v(r=h),m=!0,t.render();var l=e.rotate(),c=e.invert(t.midPt);t.graphDiv.emit("plotly_relayouting",{"geo.projection.scale":e.scale()/t.fitScale,"geo.center.lon":c[0],"geo.center.lat":c[1],"geo.projection.rotation.lon":-l[0]})})).on("zoomend",(function(){n.select(this).style(c),m&&f(t,e,y)})),g}function d(t,e){var r,i={r:e.rotate(),k:e.scale()},a=u(0,e),o=function(t){var e=0,r=arguments.length,i=[];for(;++ed?(a=(f>0?90:-90)-p,i=0):(a=Math.asin(f/d)*s-p,i=Math.sqrt(d*d-f*f));var m=180-a-2*p,g=(Math.atan2(h,u)-Math.atan2(c,i))*s,v=(Math.atan2(h,u)-Math.atan2(c,-i))*s;return b(r[0],r[1],a,g)<=b(r[0],r[1],m,v)?[a,g,r[2]]:[m,v,r[2]]}function b(t,e,r,n){var i=_(r-t),a=_(n-e);return Math.sqrt(i*i+a*a)}function _(t){return(t%360+540)%360-180}function w(t,e,r){var n=r*o,i=t.slice(),a=0===e?1:0,s=2===e?1:2,l=Math.cos(n),c=Math.sin(n);return i[a]=t[a]*l-t[s]*c,i[s]=t[s]*l+t[a]*c,i}function T(t){return[Math.atan2(2*(t[0]*t[1]+t[2]*t[3]),1-2*(t[1]*t[1]+t[2]*t[2]))*s,Math.asin(Math.max(-1,Math.min(1,2*(t[0]*t[2]-t[3]*t[1]))))*s,Math.atan2(2*(t[0]*t[3]+t[1]*t[2]),1-2*(t[2]*t[2]+t[3]*t[3]))*s]}function k(t,e){for(var r=0,n=0,i=t.length;nMath.abs(s)?(c.boxEnd[1]=c.boxStart[1]+Math.abs(a)*_*(s>=0?1:-1),c.boxEnd[1]l[3]&&(c.boxEnd[1]=l[3],c.boxEnd[0]=c.boxStart[0]+(l[3]-c.boxStart[1])/Math.abs(_))):(c.boxEnd[0]=c.boxStart[0]+Math.abs(s)/_*(a>=0?1:-1),c.boxEnd[0]l[2]&&(c.boxEnd[0]=l[2],c.boxEnd[1]=c.boxStart[1]+(l[2]-c.boxStart[0])*Math.abs(_)))}}else c.boxEnabled?(a=c.boxStart[0]!==c.boxEnd[0],s=c.boxStart[1]!==c.boxEnd[1],a||s?(a&&(g(0,c.boxStart[0],c.boxEnd[0]),t.xaxis.autorange=!1),s&&(g(1,c.boxStart[1],c.boxEnd[1]),t.yaxis.autorange=!1),t.relayoutCallback()):t.glplot.setDirty(),c.boxEnabled=!1,c.boxInited=!1):c.boxInited&&(c.boxInited=!1);break;case"pan":c.boxEnabled=!1,c.boxInited=!1,e?(c.panning||(c.dragStart[0]=n,c.dragStart[1]=i),Math.abs(c.dragStart[0]-n).999&&(m="turntable"):m="turntable")}else m="turntable";r("dragmode",m),r("hovermode",n.getDfltFromLayout("hovermode"))}e.exports=function(t,e,r){var i=e._basePlotModules.length>1;o(t,e,r,{type:"gl3d",attributes:l,handleDefaults:u,fullLayout:e,font:e.font,fullData:r,getDfltFromLayout:function(e){if(!i)return n.validate(t[e],l[e])?t[e]:void 0},autotypenumbersDflt:e.autotypenumbers,paper_bgcolor:e.paper_bgcolor,calendar:e.calendar})}},{"../../../components/color":361,"../../../lib":498,"../../../registry":633,"../../get_data":588,"../../subplot_defaults":627,"./axis_defaults":596,"./layout_attributes":599}],599:[function(t,e,r){"use strict";var n=t("./axis_attributes"),i=t("../../domain").attributes,a=t("../../../lib/extend").extendFlat,o=t("../../../lib").counterRegex;function s(t,e,r){return{x:{valType:"number",dflt:t,editType:"camera"},y:{valType:"number",dflt:e,editType:"camera"},z:{valType:"number",dflt:r,editType:"camera"},editType:"camera"}}e.exports={_arrayAttrRegexps:[o("scene",".annotations",!0)],bgcolor:{valType:"color",dflt:"rgba(0,0,0,0)",editType:"plot"},camera:{up:a(s(0,0,1),{}),center:a(s(0,0,0),{}),eye:a(s(1.25,1.25,1.25),{}),projection:{type:{valType:"enumerated",values:["perspective","orthographic"],dflt:"perspective",editType:"calc"},editType:"calc"},editType:"camera"},domain:i({name:"scene",editType:"plot"}),aspectmode:{valType:"enumerated",values:["auto","cube","data","manual"],dflt:"auto",editType:"plot",impliedEdits:{"aspectratio.x":void 0,"aspectratio.y":void 0,"aspectratio.z":void 0}},aspectratio:{x:{valType:"number",min:0,editType:"plot",impliedEdits:{"^aspectmode":"manual"}},y:{valType:"number",min:0,editType:"plot",impliedEdits:{"^aspectmode":"manual"}},z:{valType:"number",min:0,editType:"plot",impliedEdits:{"^aspectmode":"manual"}},editType:"plot",impliedEdits:{aspectmode:"manual"}},xaxis:n,yaxis:n,zaxis:n,dragmode:{valType:"enumerated",values:["orbit","turntable","zoom","pan",!1],editType:"plot"},hovermode:{valType:"enumerated",values:["closest",!1],dflt:"closest",editType:"modebar"},uirevision:{valType:"any",editType:"none"},editType:"plot",_deprecated:{cameraposition:{valType:"info_array",editType:"camera"}}}},{"../../../lib":498,"../../../lib/extend":488,"../../domain":579,"./axis_attributes":595}],600:[function(t,e,r){"use strict";var n=t("../../../lib/str2rgbarray"),i=["xaxis","yaxis","zaxis"];function a(){this.enabled=[!0,!0,!0],this.colors=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.drawSides=[!0,!0,!0],this.lineWidth=[1,1,1]}a.prototype.merge=function(t){for(var e=0;e<3;++e){var r=t[i[e]];r.visible?(this.enabled[e]=r.showspikes,this.colors[e]=n(r.spikecolor),this.drawSides[e]=r.spikesides,this.lineWidth[e]=r.spikethickness):(this.enabled[e]=!1,this.drawSides[e]=!1)}},e.exports=function(t){var e=new a;return e.merge(t),e}},{"../../../lib/str2rgbarray":523}],601:[function(t,e,r){"use strict";e.exports=function(t){for(var e=t.axesOptions,r=t.glplot.axesPixels,s=t.fullSceneLayout,l=[[],[],[]],c=0;c<3;++c){var u=s[a[c]];if(u._length=(r[c].hi-r[c].lo)*r[c].pixelsPerDataUnit/t.dataScale[c],Math.abs(u._length)===1/0||isNaN(u._length))l[c]=[];else{u._input_range=u.range.slice(),u.range[0]=r[c].lo/t.dataScale[c],u.range[1]=r[c].hi/t.dataScale[c],u._m=1/(t.dataScale[c]*r[c].pixelsPerDataUnit),u.range[0]===u.range[1]&&(u.range[0]-=1,u.range[1]+=1);var f=u.tickmode;if("auto"===u.tickmode){u.tickmode="linear";var h=u.nticks||i.constrain(u._length/40,4,9);n.autoTicks(u,Math.abs(u.range[1]-u.range[0])/h)}for(var p=n.calcTicks(u,{msUTC:!0}),d=0;d/g," "));l[c]=p,u.tickmode=f}}e.ticks=l;for(c=0;c<3;++c){o[c]=.5*(t.glplot.bounds[0][c]+t.glplot.bounds[1][c]);for(d=0;d<2;++d)e.bounds[d][c]=t.glplot.bounds[d][c]}t.contourLevels=function(t){for(var e=new Array(3),r=0;r<3;++r){for(var n=t[r],i=new Array(n.length),a=0;ar.deltaY?1.1:1/1.1,a=t.glplot.getAspectratio();t.glplot.setAspectratio({x:n*a.x,y:n*a.y,z:n*a.z})}i(t)}}),!!c&&{passive:!1}),t.glplot.canvas.addEventListener("mousemove",(function(){if(!1!==t.fullSceneLayout.dragmode&&0!==t.camera.mouseListener.buttons){var e=n();t.graphDiv.emit("plotly_relayouting",e)}})),t.staticMode||t.glplot.canvas.addEventListener("webglcontextlost",(function(r){e&&e.emit&&e.emit("plotly_webglcontextlost",{event:r,layer:t.id})}),!1)),t.glplot.oncontextloss=function(){t.recoverContext()},t.glplot.onrender=function(){t.render()},!0},w.render=function(){var t,e=this,r=e.graphDiv,n=e.svgContainer,i=e.container.getBoundingClientRect();r._fullLayout._calcInverseTransform(r);var a=r._fullLayout._invScaleX,o=r._fullLayout._invScaleY,s=i.width*a,l=i.height*o;n.setAttributeNS(null,"viewBox","0 0 "+s+" "+l),n.setAttributeNS(null,"width",s),n.setAttributeNS(null,"height",l),b(e),e.glplot.axes.update(e.axesOptions);for(var c=Object.keys(e.traces),u=null,h=e.glplot.selection,m=0;m")):"isosurface"===t.type||"volume"===t.type?(T.valueLabel=p.hoverLabelText(e._mockAxis,e._mockAxis.d2l(h.traceCoordinate[3]),t.valuehoverformat),S.push("value: "+T.valueLabel),h.textLabel&&S.push(h.textLabel),x=S.join("
")):x=h.textLabel;var E={x:h.traceCoordinate[0],y:h.traceCoordinate[1],z:h.traceCoordinate[2],data:_._input,fullData:_,curveNumber:_.index,pointNumber:w};d.appendArrayPointValue(E,_,w),t._module.eventData&&(E=_._module.eventData(E,h,_,{},w));var L={points:[E]};if(e.fullSceneLayout.hovermode){var C=[];d.loneHover({trace:_,x:(.5+.5*y[0]/y[3])*s,y:(.5-.5*y[1]/y[3])*l,xLabel:T.xLabel,yLabel:T.yLabel,zLabel:T.zLabel,text:x,name:u.name,color:d.castHoverOption(_,w,"bgcolor")||u.color,borderColor:d.castHoverOption(_,w,"bordercolor"),fontFamily:d.castHoverOption(_,w,"font.family"),fontSize:d.castHoverOption(_,w,"font.size"),fontColor:d.castHoverOption(_,w,"font.color"),nameLength:d.castHoverOption(_,w,"namelength"),textAlign:d.castHoverOption(_,w,"align"),hovertemplate:f.castOption(_,w,"hovertemplate"),hovertemplateLabels:f.extendFlat({},E,T),eventData:[E]},{container:n,gd:r,inOut_bbox:C}),E.bbox=C[0]}h.buttons&&h.distance<5?r.emit("plotly_click",L):r.emit("plotly_hover",L),this.oldEventData=L}else d.loneUnhover(n),this.oldEventData&&r.emit("plotly_unhover",this.oldEventData),this.oldEventData=void 0;e.drawAnnotations(e)},w.recoverContext=function(){var t=this;t.glplot.dispose();var e=function(){t.glplot.gl.isContextLost()?requestAnimationFrame(e):t.initializeGLPlot()?t.plot.apply(t,t.plotArgs):f.error("Catastrophic and unrecoverable WebGL error. Context lost.")};requestAnimationFrame(e)};var k=["xaxis","yaxis","zaxis"];function A(t,e,r){for(var n=t.fullSceneLayout,i=0;i<3;i++){var a=k[i],o=a.charAt(0),s=n[a],l=e[o],c=e[o+"calendar"],u=e["_"+o+"length"];if(f.isArrayOrTypedArray(l))for(var h,p=0;p<(u||l.length);p++)if(f.isArrayOrTypedArray(l[p]))for(var d=0;dg[1][a])g[0][a]=-1,g[1][a]=1;else{var L=g[1][a]-g[0][a];g[0][a]-=L/32,g[1][a]+=L/32}if("reversed"===s.autorange){var C=g[0][a];g[0][a]=g[1][a],g[1][a]=C}}else{var P=s.range;g[0][a]=s.r2l(P[0]),g[1][a]=s.r2l(P[1])}g[0][a]===g[1][a]&&(g[0][a]-=1,g[1][a]+=1),v[a]=g[1][a]-g[0][a],this.glplot.setBounds(a,{min:g[0][a]*h[a],max:g[1][a]*h[a]})}var I=c.aspectmode;if("cube"===I)d=[1,1,1];else if("manual"===I){var O=c.aspectratio;d=[O.x,O.y,O.z]}else{if("auto"!==I&&"data"!==I)throw new Error("scene.js aspectRatio was not one of the enumerated types");var z=[1,1,1];for(a=0;a<3;++a){var D=y[l=(s=c[k[a]]).type];z[a]=Math.pow(D.acc,1/D.count)/h[a]}d="data"===I||Math.max.apply(null,z)/Math.min.apply(null,z)<=4?z:[1,1,1]}c.aspectratio.x=u.aspectratio.x=d[0],c.aspectratio.y=u.aspectratio.y=d[1],c.aspectratio.z=u.aspectratio.z=d[2],this.glplot.setAspectratio(c.aspectratio),this.viewInitial.aspectratio||(this.viewInitial.aspectratio={x:c.aspectratio.x,y:c.aspectratio.y,z:c.aspectratio.z}),this.viewInitial.aspectmode||(this.viewInitial.aspectmode=c.aspectmode);var R=c.domain||null,F=e._size||null;if(R&&F){var B=this.container.style;B.position="absolute",B.left=F.l+R.x[0]*F.w+"px",B.top=F.t+(1-R.y[1])*F.h+"px",B.width=F.w*(R.x[1]-R.x[0])+"px",B.height=F.h*(R.y[1]-R.y[0])+"px"}this.glplot.redraw()}},w.destroy=function(){this.glplot&&(this.camera.mouseListener.enabled=!1,this.container.removeEventListener("wheel",this.camera.wheelListener),this.camera=null,this.glplot.dispose(),this.container.parentNode.removeChild(this.container),this.glplot=null)},w.getCamera=function(){var t;return this.camera.view.recalcMatrix(this.camera.view.lastT()),{up:{x:(t=this.camera).up[0],y:t.up[1],z:t.up[2]},center:{x:t.center[0],y:t.center[1],z:t.center[2]},eye:{x:t.eye[0],y:t.eye[1],z:t.eye[2]},projection:{type:!0===t._ortho?"orthographic":"perspective"}}},w.setViewport=function(t){var e,r=t.camera;this.camera.lookAt.apply(this,[[(e=r).eye.x,e.eye.y,e.eye.z],[e.center.x,e.center.y,e.center.z],[e.up.x,e.up.y,e.up.z]]),this.glplot.setAspectratio(t.aspectratio),"orthographic"===r.projection.type!==this.camera._ortho&&(this.glplot.redraw(),this.glplot.clearRGBA(),this.glplot.dispose(),this.initializeGLPlot())},w.isCameraChanged=function(t){var e=this.getCamera(),r=f.nestedProperty(t,this.id+".camera").get();function n(t,e,r,n){var i=["up","center","eye"],a=["x","y","z"];return e[i[r]]&&t[i[r]][a[n]]===e[i[r]][a[n]]}var i=!1;if(void 0===r)i=!0;else{for(var a=0;a<3;a++)for(var o=0;o<3;o++)if(!n(e,r,a,o)){i=!0;break}(!r.projection||e.projection&&e.projection.type!==r.projection.type)&&(i=!0)}return i},w.isAspectChanged=function(t){var e=this.glplot.getAspectratio(),r=f.nestedProperty(t,this.id+".aspectratio").get();return void 0===r||r.x!==e.x||r.y!==e.y||r.z!==e.z},w.saveLayout=function(t){var e,r,n,i,a,o,s=this.fullLayout,l=this.isCameraChanged(t),c=this.isAspectChanged(t),h=l||c;if(h){var p={};if(l&&(e=this.getCamera(),n=(r=f.nestedProperty(t,this.id+".camera")).get(),p[this.id+".camera"]=n),c&&(i=this.glplot.getAspectratio(),o=(a=f.nestedProperty(t,this.id+".aspectratio")).get(),p[this.id+".aspectratio"]=o),u.call("_storeDirectGUIEdit",t,s._preGUI,p),l)r.set(e),f.nestedProperty(s,this.id+".camera").set(e);if(c)a.set(i),f.nestedProperty(s,this.id+".aspectratio").set(i),this.glplot.redraw()}return h},w.updateFx=function(t,e){var r=this.camera;if(r)if("orbit"===t)r.mode="orbit",r.keyBindingMode="rotate";else if("turntable"===t){r.up=[0,0,1],r.mode="turntable",r.keyBindingMode="rotate";var n=this.graphDiv,i=n._fullLayout,a=this.fullSceneLayout.camera,o=a.up.x,s=a.up.y,l=a.up.z;if(l/Math.sqrt(o*o+s*s+l*l)<.999){var c=this.id+".camera.up",h={x:0,y:0,z:1},p={};p[c]=h;var d=n.layout;u.call("_storeDirectGUIEdit",d,i._preGUI,p),a.up=h,f.nestedProperty(d,c).set(h)}}else r.keyBindingMode=t;this.fullSceneLayout.hovermode=e},w.toImage=function(t){t||(t="png"),this.staticMode&&this.container.appendChild(n),this.glplot.redraw();var e=this.glplot.gl,r=e.drawingBufferWidth,i=e.drawingBufferHeight;e.bindFramebuffer(e.FRAMEBUFFER,null);var a=new Uint8Array(r*i*4);e.readPixels(0,0,r,i,e.RGBA,e.UNSIGNED_BYTE,a),function(t,e,r){for(var n=0,i=r-1;n0)for(var s=255/o,l=0;l<3;++l)t[a+l]=Math.min(s*t[a+l],255)}}(a,r,i);var o=document.createElement("canvas");o.width=r,o.height=i;var s,l=o.getContext("2d"),c=l.createImageData(r,i);switch(c.data.set(a),l.putImageData(c,0,0),t){case"jpeg":s=o.toDataURL("image/jpeg");break;case"webp":s=o.toDataURL("image/webp");break;default:s=o.toDataURL("image/png")}return this.staticMode&&this.container.removeChild(n),s},w.setConvert=function(){for(var t=0;t<3;t++){var e=this.fullSceneLayout[k[t]];p.setConvert(e,this.fullLayout),e.setScale=f.noop}},w.make4thDimension=function(){var t=this.graphDiv._fullLayout;this._mockAxis={type:"linear",showexponent:"all",exponentformat:"B"},p.setConvert(this._mockAxis,t)},e.exports=_},{"../../../stackgl_modules":1119,"../../components/fx":401,"../../lib":498,"../../lib/show_no_webgl_msg":520,"../../lib/str2rgbarray":523,"../../plots/cartesian/axes":549,"../../registry":633,"./layout/convert":597,"./layout/spikes":600,"./layout/tick_marks":601,"./project":602,"has-passive-events":224,"webgl-context":326}],604:[function(t,e,r){"use strict";e.exports=function(t,e,r,n){n=n||t.length;for(var i=new Array(n),a=0;aOpenStreetMap contributors',a=['\xa9 Carto',i].join(" "),o=['Map tiles by Stamen Design','under CC BY 3.0',"|",'Data by OpenStreetMap contributors','under ODbL'].join(" "),s={"open-street-map":{id:"osm",version:8,sources:{"plotly-osm-tiles":{type:"raster",attribution:i,tiles:["https://a.tile.openstreetmap.org/{z}/{x}/{y}.png","https://b.tile.openstreetmap.org/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-osm-tiles",type:"raster",source:"plotly-osm-tiles",minzoom:0,maxzoom:22}]},"white-bg":{id:"white-bg",version:8,sources:{},layers:[{id:"white-bg",type:"background",paint:{"background-color":"#FFFFFF"},minzoom:0,maxzoom:22}]},"carto-positron":{id:"carto-positron",version:8,sources:{"plotly-carto-positron":{type:"raster",attribution:a,tiles:["https://cartodb-basemaps-c.global.ssl.fastly.net/light_all/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-carto-positron",type:"raster",source:"plotly-carto-positron",minzoom:0,maxzoom:22}]},"carto-darkmatter":{id:"carto-darkmatter",version:8,sources:{"plotly-carto-darkmatter":{type:"raster",attribution:a,tiles:["https://cartodb-basemaps-c.global.ssl.fastly.net/dark_all/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-carto-darkmatter",type:"raster",source:"plotly-carto-darkmatter",minzoom:0,maxzoom:22}]},"stamen-terrain":{id:"stamen-terrain",version:8,sources:{"plotly-stamen-terrain":{type:"raster",attribution:o,tiles:["https://stamen-tiles.a.ssl.fastly.net/terrain/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-stamen-terrain",type:"raster",source:"plotly-stamen-terrain",minzoom:0,maxzoom:22}]},"stamen-toner":{id:"stamen-toner",version:8,sources:{"plotly-stamen-toner":{type:"raster",attribution:o,tiles:["https://stamen-tiles.a.ssl.fastly.net/toner/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-stamen-toner",type:"raster",source:"plotly-stamen-toner",minzoom:0,maxzoom:22}]},"stamen-watercolor":{id:"stamen-watercolor",version:8,sources:{"plotly-stamen-watercolor":{type:"raster",attribution:['Map tiles by Stamen Design','under CC BY 3.0',"|",'Data by OpenStreetMap contributors','under CC BY SA'].join(" "),tiles:["https://stamen-tiles.a.ssl.fastly.net/watercolor/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-stamen-watercolor",type:"raster",source:"plotly-stamen-watercolor",minzoom:0,maxzoom:22}]}},l=n(s);e.exports={requiredVersion:"1.10.1",styleUrlPrefix:"mapbox://styles/mapbox/",styleUrlSuffix:"v9",styleValuesMapbox:["basic","streets","outdoors","light","dark","satellite","satellite-streets"],styleValueDflt:"basic",stylesNonMapbox:s,styleValuesNonMapbox:l,traceLayerPrefix:"plotly-trace-layer-",layoutLayerPrefix:"plotly-layout-layer-",wrongVersionErrorMsg:["Your custom plotly.js bundle is not using the correct mapbox-gl version","Please install mapbox-gl@1.10.1."].join("\n"),noAccessTokenErrorMsg:["Missing Mapbox access token.","Mapbox trace type require a Mapbox access token to be registered.","For example:"," Plotly.newPlot(gd, data, layout, { mapboxAccessToken: 'my-access-token' });","More info here: https://www.mapbox.com/help/define-access-token/"].join("\n"),missingStyleErrorMsg:["No valid mapbox style found, please set `mapbox.style` to one of:",l.join(", "),"or register a Mapbox access token to use a Mapbox-served style."].join("\n"),multipleTokensErrorMsg:["Set multiple mapbox access token across different mapbox subplot,","using first token found as mapbox-gl does not allow multipleaccess tokens on the same page."].join("\n"),mapOnErrorMsg:"Mapbox error.",mapboxLogo:{path0:"m 10.5,1.24 c -5.11,0 -9.25,4.15 -9.25,9.25 0,5.1 4.15,9.25 9.25,9.25 5.1,0 9.25,-4.15 9.25,-9.25 0,-5.11 -4.14,-9.25 -9.25,-9.25 z m 4.39,11.53 c -1.93,1.93 -4.78,2.31 -6.7,2.31 -0.7,0 -1.41,-0.05 -2.1,-0.16 0,0 -1.02,-5.64 2.14,-8.81 0.83,-0.83 1.95,-1.28 3.13,-1.28 1.27,0 2.49,0.51 3.39,1.42 1.84,1.84 1.89,4.75 0.14,6.52 z",path1:"M 10.5,-0.01 C 4.7,-0.01 0,4.7 0,10.49 c 0,5.79 4.7,10.5 10.5,10.5 5.8,0 10.5,-4.7 10.5,-10.5 C 20.99,4.7 16.3,-0.01 10.5,-0.01 Z m 0,19.75 c -5.11,0 -9.25,-4.15 -9.25,-9.25 0,-5.1 4.14,-9.26 9.25,-9.26 5.11,0 9.25,4.15 9.25,9.25 0,5.13 -4.14,9.26 -9.25,9.26 z",path2:"M 14.74,6.25 C 12.9,4.41 9.98,4.35 8.23,6.1 5.07,9.27 6.09,14.91 6.09,14.91 c 0,0 5.64,1.02 8.81,-2.14 C 16.64,11 16.59,8.09 14.74,6.25 Z m -2.27,4.09 -0.91,1.87 -0.9,-1.87 -1.86,-0.91 1.86,-0.9 0.9,-1.87 0.91,1.87 1.86,0.9 z",polygon:"11.56,12.21 10.66,10.34 8.8,9.43 10.66,8.53 11.56,6.66 12.47,8.53 14.33,9.43 12.47,10.34"},styleRules:{map:"overflow:hidden;position:relative;","missing-css":"display:none;",canary:"background-color:salmon;","ctrl-bottom-left":"position: absolute; pointer-events: none; z-index: 2; bottom: 0; left: 0;","ctrl-bottom-right":"position: absolute; pointer-events: none; z-index: 2; right: 0; bottom: 0;",ctrl:"clear: both; pointer-events: auto; transform: translate(0, 0);","ctrl-attrib.mapboxgl-compact .mapboxgl-ctrl-attrib-inner":"display: none;","ctrl-attrib.mapboxgl-compact:hover .mapboxgl-ctrl-attrib-inner":"display: block; margin-top:2px","ctrl-attrib.mapboxgl-compact:hover":"padding: 2px 24px 2px 4px; visibility: visible; margin-top: 6px;","ctrl-attrib.mapboxgl-compact::after":'content: ""; cursor: pointer; position: absolute; background-image: url(\'data:image/svg+xml;charset=utf-8,%3Csvg viewBox="0 0 20 20" xmlns="http://www.w3.org/2000/svg"%3E %3Cpath fill="%23333333" fill-rule="evenodd" d="M4,10a6,6 0 1,0 12,0a6,6 0 1,0 -12,0 M9,7a1,1 0 1,0 2,0a1,1 0 1,0 -2,0 M9,10a1,1 0 1,1 2,0l0,3a1,1 0 1,1 -2,0"/%3E %3C/svg%3E\'); background-color: rgba(255, 255, 255, 0.5); width: 24px; height: 24px; box-sizing: border-box; border-radius: 12px;',"ctrl-attrib.mapboxgl-compact":"min-height: 20px; padding: 0; margin: 10px; position: relative; background-color: #fff; border-radius: 3px 12px 12px 3px;","ctrl-bottom-right > .mapboxgl-ctrl-attrib.mapboxgl-compact::after":"bottom: 0; right: 0","ctrl-bottom-left > .mapboxgl-ctrl-attrib.mapboxgl-compact::after":"bottom: 0; left: 0","ctrl-bottom-left .mapboxgl-ctrl":"margin: 0 0 10px 10px; float: left;","ctrl-bottom-right .mapboxgl-ctrl":"margin: 0 10px 10px 0; float: right;","ctrl-attrib":"color: rgba(0, 0, 0, 0.75); text-decoration: none; font-size: 12px","ctrl-attrib a":"color: rgba(0, 0, 0, 0.75); text-decoration: none; font-size: 12px","ctrl-attrib a:hover":"color: inherit; text-decoration: underline;","ctrl-attrib .mapbox-improve-map":"font-weight: bold; margin-left: 2px;","attrib-empty":"display: none;","ctrl-logo":'display:block; width: 21px; height: 21px; background-image: url(\'data:image/svg+xml;charset=utf-8,%3C?xml version="1.0" encoding="utf-8"?%3E %3Csvg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 21 21" style="enable-background:new 0 0 21 21;" xml:space="preserve"%3E%3Cg transform="translate(0,0.01)"%3E%3Cpath d="m 10.5,1.24 c -5.11,0 -9.25,4.15 -9.25,9.25 0,5.1 4.15,9.25 9.25,9.25 5.1,0 9.25,-4.15 9.25,-9.25 0,-5.11 -4.14,-9.25 -9.25,-9.25 z m 4.39,11.53 c -1.93,1.93 -4.78,2.31 -6.7,2.31 -0.7,0 -1.41,-0.05 -2.1,-0.16 0,0 -1.02,-5.64 2.14,-8.81 0.83,-0.83 1.95,-1.28 3.13,-1.28 1.27,0 2.49,0.51 3.39,1.42 1.84,1.84 1.89,4.75 0.14,6.52 z" style="opacity:0.9;fill:%23ffffff;enable-background:new" class="st0"/%3E%3Cpath d="M 10.5,-0.01 C 4.7,-0.01 0,4.7 0,10.49 c 0,5.79 4.7,10.5 10.5,10.5 5.8,0 10.5,-4.7 10.5,-10.5 C 20.99,4.7 16.3,-0.01 10.5,-0.01 Z m 0,19.75 c -5.11,0 -9.25,-4.15 -9.25,-9.25 0,-5.1 4.14,-9.26 9.25,-9.26 5.11,0 9.25,4.15 9.25,9.25 0,5.13 -4.14,9.26 -9.25,9.26 z" style="opacity:0.35;enable-background:new" class="st1"/%3E%3Cpath d="M 14.74,6.25 C 12.9,4.41 9.98,4.35 8.23,6.1 5.07,9.27 6.09,14.91 6.09,14.91 c 0,0 5.64,1.02 8.81,-2.14 C 16.64,11 16.59,8.09 14.74,6.25 Z m -2.27,4.09 -0.91,1.87 -0.9,-1.87 -1.86,-0.91 1.86,-0.9 0.9,-1.87 0.91,1.87 1.86,0.9 z" style="opacity:0.35;enable-background:new" class="st1"/%3E%3Cpolygon points="11.56,12.21 10.66,10.34 8.8,9.43 10.66,8.53 11.56,6.66 12.47,8.53 14.33,9.43 12.47,10.34 " style="opacity:0.9;fill:%23ffffff;enable-background:new" class="st0"/%3E%3C/g%3E%3C/svg%3E\')'}}},{"../../lib/sort_object_keys":521}],607:[function(t,e,r){"use strict";var n=t("../../lib");e.exports=function(t,e){var r=t.split(" "),i=r[0],a=r[1],o=n.isArrayOrTypedArray(e)?n.mean(e):e,s=.5+o/100,l=1.5+o/100,c=["",""],u=[0,0];switch(i){case"top":c[0]="top",u[1]=-l;break;case"bottom":c[0]="bottom",u[1]=l}switch(a){case"left":c[1]="right",u[0]=-s;break;case"right":c[1]="left",u[0]=s}return{anchor:c[0]&&c[1]?c.join("-"):c[0]?c[0]:c[1]?c[1]:"center",offset:u}}},{"../../lib":498}],608:[function(t,e,r){"use strict";var n=t("mapbox-gl/dist/mapbox-gl-unminified"),i=t("../../lib"),a=i.strTranslate,o=i.strScale,s=t("../../plots/get_data").getSubplotCalcData,l=t("../../constants/xmlns_namespaces"),c=t("@plotly/d3"),u=t("../../components/drawing"),f=t("../../lib/svg_text_utils"),h=t("./mapbox"),p=r.constants=t("./constants");function d(t){return"string"==typeof t&&(-1!==p.styleValuesMapbox.indexOf(t)||0===t.indexOf("mapbox://"))}r.name="mapbox",r.attr="subplot",r.idRoot="mapbox",r.idRegex=r.attrRegex=i.counterRegex("mapbox"),r.attributes={subplot:{valType:"subplotid",dflt:"mapbox",editType:"calc"}},r.layoutAttributes=t("./layout_attributes"),r.supplyLayoutDefaults=t("./layout_defaults"),r.plot=function(t){var e=t._fullLayout,r=t.calcdata,a=e._subplots.mapbox;if(n.version!==p.requiredVersion)throw new Error(p.wrongVersionErrorMsg);var o=function(t,e){var r=t._fullLayout;if(""===t._context.mapboxAccessToken)return"";for(var n=[],a=[],o=!1,s=!1,l=0;l1&&i.warn(p.multipleTokensErrorMsg),n[0]):(a.length&&i.log(["Listed mapbox access token(s)",a.join(","),"but did not use a Mapbox map style, ignoring token(s)."].join(" ")),"")}(t,a);n.accessToken=o;for(var l=0;l_/2){var w=v.split("|").join("
");x.text(w).attr("data-unformatted",w).call(f.convertToTspans,t),b=u.bBox(x.node())}x.attr("transform",a(-3,8-b.height)),y.insert("rect",".static-attribution").attr({x:-b.width-6,y:-b.height-3,width:b.width+6,height:b.height+3,fill:"rgba(255, 255, 255, 0.75)"});var T=1;b.width+6>_&&(T=_/(b.width+6));var k=[n.l+n.w*h.x[1],n.t+n.h*(1-h.y[0])];y.attr("transform",a(k[0],k[1])+o(T))}},r.updateFx=function(t){for(var e=t._fullLayout,r=e._subplots.mapbox,n=0;n0){for(var r=0;r0}function u(t){var e={},r={};switch(t.type){case"circle":n.extendFlat(r,{"circle-radius":t.circle.radius,"circle-color":t.color,"circle-opacity":t.opacity});break;case"line":n.extendFlat(r,{"line-width":t.line.width,"line-color":t.color,"line-opacity":t.opacity,"line-dasharray":t.line.dash});break;case"fill":n.extendFlat(r,{"fill-color":t.color,"fill-outline-color":t.fill.outlinecolor,"fill-opacity":t.opacity});break;case"symbol":var i=t.symbol,o=a(i.textposition,i.iconsize);n.extendFlat(e,{"icon-image":i.icon+"-15","icon-size":i.iconsize/10,"text-field":i.text,"text-size":i.textfont.size,"text-anchor":o.anchor,"text-offset":o.offset,"symbol-placement":i.placement}),n.extendFlat(r,{"icon-color":t.color,"text-color":i.textfont.color,"text-opacity":t.opacity});break;case"raster":n.extendFlat(r,{"raster-fade-duration":0,"raster-opacity":t.opacity})}return{layout:e,paint:r}}l.update=function(t){this.visible?this.needsNewImage(t)?this.updateImage(t):this.needsNewSource(t)?(this.removeLayer(),this.updateSource(t),this.updateLayer(t)):this.needsNewLayer(t)?this.updateLayer(t):this.updateStyle(t):(this.updateSource(t),this.updateLayer(t)),this.visible=c(t)},l.needsNewImage=function(t){return this.subplot.map.getSource(this.idSource)&&"image"===this.sourceType&&"image"===t.sourcetype&&(this.source!==t.source||JSON.stringify(this.coordinates)!==JSON.stringify(t.coordinates))},l.needsNewSource=function(t){return this.sourceType!==t.sourcetype||JSON.stringify(this.source)!==JSON.stringify(t.source)||this.layerType!==t.type},l.needsNewLayer=function(t){return this.layerType!==t.type||this.below!==this.subplot.belowLookup["layout-"+this.index]},l.lookupBelow=function(){return this.subplot.belowLookup["layout-"+this.index]},l.updateImage=function(t){this.subplot.map.getSource(this.idSource).updateImage({url:t.source,coordinates:t.coordinates});var e=this.findFollowingMapboxLayerId(this.lookupBelow());null!==e&&this.subplot.map.moveLayer(this.idLayer,e)},l.updateSource=function(t){var e=this.subplot.map;if(e.getSource(this.idSource)&&e.removeSource(this.idSource),this.sourceType=t.sourcetype,this.source=t.source,c(t)){var r=function(t){var e,r=t.sourcetype,n=t.source,a={type:r};"geojson"===r?e="data":"vector"===r?e="string"==typeof n?"url":"tiles":"raster"===r?(e="tiles",a.tileSize=256):"image"===r&&(e="url",a.coordinates=t.coordinates);a[e]=n,t.sourceattribution&&(a.attribution=i(t.sourceattribution));return a}(t);e.addSource(this.idSource,r)}},l.findFollowingMapboxLayerId=function(t){if("traces"===t)for(var e=this.subplot.getMapLayers(),r=0;r1)for(r=0;r-1&&v(e.originalEvent,n,[r.xaxis],[r.yaxis],r.id,t),i.indexOf("event")>-1&&c.click(n,e.originalEvent)}}},_.updateFx=function(t){var e=this,r=e.map,n=e.gd;if(!e.isStatic){var a,o=t.dragmode;a=f(o)?function(t,r){(t.range={})[e.id]=[c([r.xmin,r.ymin]),c([r.xmax,r.ymax])]}:function(t,r,n){(t.lassoPoints={})[e.id]=n.filtered.map(c)};var s=e.dragOptions;e.dragOptions=i.extendDeep(s||{},{dragmode:t.dragmode,element:e.div,gd:n,plotinfo:{id:e.id,domain:t[e.id].domain,xaxis:e.xaxis,yaxis:e.yaxis,fillRangeItems:a},xaxes:[e.xaxis],yaxes:[e.yaxis],subplot:e.id}),r.off("click",e.onClickInPanHandler),p(o)||h(o)?(r.dragPan.disable(),r.on("zoomstart",e.clearSelect),e.dragOptions.prepFn=function(t,r,n){d(t,r,n,e.dragOptions,o)},l.init(e.dragOptions)):(r.dragPan.enable(),r.off("zoomstart",e.clearSelect),e.div.onmousedown=null,e.onClickInPanHandler=e.onClickInPanFn(e.dragOptions),r.on("click",e.onClickInPanHandler))}function c(t){var r=e.map.unproject(t);return[r.lng,r.lat]}},_.updateFramework=function(t){var e=t[this.id].domain,r=t._size,n=this.div.style;n.width=r.w*(e.x[1]-e.x[0])+"px",n.height=r.h*(e.y[1]-e.y[0])+"px",n.left=r.l+e.x[0]*r.w+"px",n.top=r.t+(1-e.y[1])*r.h+"px",this.xaxis._offset=r.l+e.x[0]*r.w,this.xaxis._length=r.w*(e.x[1]-e.x[0]),this.yaxis._offset=r.t+(1-e.y[1])*r.h,this.yaxis._length=r.h*(e.y[1]-e.y[0])},_.updateLayers=function(t){var e,r=t[this.id].layers,n=this.layerList;if(r.length!==n.length){for(e=0;e=e.width-20?(a["text-anchor"]="start",a.x=5):(a["text-anchor"]="end",a.x=e._paper.attr("width")-7),r.attr(a);var o=r.select(".js-link-to-tool"),s=r.select(".js-link-spacer"),l=r.select(".js-sourcelinks");t._context.showSources&&t._context.showSources(t),t._context.showLink&&function(t,e){e.text("");var r=e.append("a").attr({"xlink:xlink:href":"#",class:"link--impt link--embedview","font-weight":"bold"}).text(t._context.linkText+" "+String.fromCharCode(187));if(t._context.sendData)r.on("click",(function(){b.sendDataToCloud(t)}));else{var n=window.location.pathname.split("/"),i=window.location.search;r.attr({"xlink:xlink:show":"new","xlink:xlink:href":"/"+n[2].split(".")[0]+"/"+n[1]+i})}}(t,o),s.text(o.text()&&l.text()?" - ":"")}},b.sendDataToCloud=function(t){var e=(window.PLOTLYENV||{}).BASE_URL||t._context.plotlyServerURL;if(e){t.emit("plotly_beforeexport");var r=n.select(t).append("div").attr("id","hiddenform").style("display","none"),i=r.append("form").attr({action:e+"/external",method:"post",target:"_blank"});return i.append("input").attr({type:"text",name:"data"}).node().value=b.graphJson(t,!1,"keepdata"),i.node().submit(),r.remove(),t.emit("plotly_afterexport"),!1}};var T=["days","shortDays","months","shortMonths","periods","dateTime","date","time","decimal","thousands","grouping","currency"],k=["year","month","dayMonth","dayMonthYear"];function A(t,e){var r=t._context.locale;r||(r="en-US");var n=!1,i={};function a(t){for(var r=!0,a=0;a1&&z.length>1){for(s.getComponentMethod("grid","sizeDefaults")(c,l),o=0;o15&&z.length>15&&0===l.shapes.length&&0===l.images.length,b.linkSubplots(h,l,f,n),b.cleanPlot(h,l,f,n);var N=!(!n._has||!n._has("gl2d")),j=!(!l._has||!l._has("gl2d")),U=!(!n._has||!n._has("cartesian"))||N,V=!(!l._has||!l._has("cartesian"))||j;U&&!V?n._bgLayer.remove():V&&!U&&(l._shouldCreateBgLayer=!0),n._zoomlayer&&!t._dragging&&d({_fullLayout:n}),function(t,e){var r,n=[];e.meta&&(r=e._meta={meta:e.meta,layout:{meta:e.meta}});for(var i=0;i0){var f=1-2*s;n=Math.round(f*n),i=Math.round(f*i)}}var h=b.layoutAttributes.width.min,p=b.layoutAttributes.height.min;n1,m=!e.height&&Math.abs(r.height-i)>1;(m||d)&&(d&&(r.width=n),m&&(r.height=i)),t._initialAutoSize||(t._initialAutoSize={width:n,height:i}),b.sanitizeMargins(r)},b.supplyLayoutModuleDefaults=function(t,e,r,n){var i,a,o,l=s.componentsRegistry,c=e._basePlotModules,f=s.subplotsRegistry.cartesian;for(i in l)(o=l[i]).includeBasePlot&&o.includeBasePlot(t,e);for(var h in c.length||c.push(f),e._has("cartesian")&&(s.getComponentMethod("grid","contentDefaults")(t,e),f.finalizeSubplots(t,e)),e._subplots)e._subplots[h].sort(u.subplotSort);for(a=0;a1&&(r.l/=m,r.r/=m)}if(f){var g=(r.t+r.b)/f;g>1&&(r.t/=g,r.b/=g)}var v=void 0!==r.xl?r.xl:r.x,y=void 0!==r.xr?r.xr:r.x,x=void 0!==r.yt?r.yt:r.y,_=void 0!==r.yb?r.yb:r.y;h[e]={l:{val:v,size:r.l+d},r:{val:y,size:r.r+d},b:{val:_,size:r.b+d},t:{val:x,size:r.t+d}},p[e]=1}else delete h[e],delete p[e];if(!n._replotting)return b.doAutoMargin(t)}},b.doAutoMargin=function(t){var e=t._fullLayout,r=e.width,n=e.height;e._size||(e._size={}),C(e);var i=e._size,a=e.margin,l=u.extendFlat({},i),c=a.l,f=a.r,h=a.t,d=a.b,m=e._pushmargin,g=e._pushmarginIds;if(!1!==e.margin.autoexpand){for(var v in m)g[v]||delete m[v];for(var y in m.base={l:{val:0,size:c},r:{val:1,size:f},t:{val:1,size:h},b:{val:0,size:d}},m){var x=m[y].l||{},_=m[y].b||{},w=x.val,T=x.size,k=_.val,A=_.size;for(var M in m){if(o(T)&&m[M].r){var S=m[M].r.val,E=m[M].r.size;if(S>w){var L=(T*S+(E-r)*w)/(S-w),P=(E*(1-w)+(T-r)*(1-S))/(S-w);L+P>c+f&&(c=L,f=P)}}if(o(A)&&m[M].t){var I=m[M].t.val,O=m[M].t.size;if(I>k){var z=(A*I+(O-n)*k)/(I-k),D=(O*(1-k)+(A-n)*(1-I))/(I-k);z+D>d+h&&(d=z,h=D)}}}}}var R=u.constrain(r-a.l-a.r,2,64),F=u.constrain(n-a.t-a.b,2,64),B=Math.max(0,r-R),N=Math.max(0,n-F);if(B){var j=(c+f)/B;j>1&&(c/=j,f/=j)}if(N){var U=(d+h)/N;U>1&&(d/=U,h/=U)}if(i.l=Math.round(c),i.r=Math.round(f),i.t=Math.round(h),i.b=Math.round(d),i.p=Math.round(a.pad),i.w=Math.round(r)-i.l-i.r,i.h=Math.round(n)-i.t-i.b,!e._replotting&&b.didMarginChange(l,i)){"_redrawFromAutoMarginCount"in e?e._redrawFromAutoMarginCount++:e._redrawFromAutoMarginCount=1;var V=3*(1+Object.keys(g).length);if(e._redrawFromAutoMarginCount0&&(t._transitioningWithDuration=!0),t._transitionData._interruptCallbacks.push((function(){n=!0})),r.redraw&&t._transitionData._interruptCallbacks.push((function(){return s.call("redraw",t)})),t._transitionData._interruptCallbacks.push((function(){t.emit("plotly_transitioninterrupted",[])}));var a=0,o=0;function l(){return a++,function(){o++,n||o!==a||function(e){if(!t._transitionData)return;(function(t){if(t)for(;t.length;)t.shift()})(t._transitionData._interruptCallbacks),Promise.resolve().then((function(){if(r.redraw)return s.call("redraw",t)})).then((function(){t._transitioning=!1,t._transitioningWithDuration=!1,t.emit("plotly_transitioned",[])})).then(e)}(i)}}r.runFn(l),setTimeout(l())}))}],a=u.syncOrAsync(i,t);return a&&a.then||(a=Promise.resolve()),a.then((function(){return t}))}b.didMarginChange=function(t,e){for(var r=0;r1)return!0}return!1},b.graphJson=function(t,e,r,n,i,a){(i&&e&&!t._fullData||i&&!e&&!t._fullLayout)&&b.supplyDefaults(t);var o=i?t._fullData:t.data,s=i?t._fullLayout:t.layout,l=(t._transitionData||{})._frames;function c(t,e){if("function"==typeof t)return e?"_function_":null;if(u.isPlainObject(t)){var n,i={};return Object.keys(t).sort().forEach((function(a){if(-1===["_","["].indexOf(a.charAt(0)))if("function"!=typeof t[a]){if("keepdata"===r){if("src"===a.substr(a.length-3))return}else if("keepstream"===r){if("string"==typeof(n=t[a+"src"])&&n.indexOf(":")>0&&!u.isPlainObject(t.stream))return}else if("keepall"!==r&&"string"==typeof(n=t[a+"src"])&&n.indexOf(":")>0)return;i[a]=c(t[a],e)}else e&&(i[a]="_function")})),i}return Array.isArray(t)?t.map((function(t){return c(t,e)})):u.isTypedArray(t)?u.simpleMap(t,u.identity):u.isJSDate(t)?u.ms2DateTimeLocal(+t):t}var f={data:(o||[]).map((function(t){var r=c(t);return e&&delete r.fit,r}))};if(!e&&(f.layout=c(s),i)){var h=s._size;f.layout.computed={margin:{b:h.b,l:h.l,r:h.r,t:h.t}}}return l&&(f.frames=c(l)),a&&(f.config=c(t._context,!0)),"object"===n?f:JSON.stringify(f)},b.modifyFrames=function(t,e){var r,n,i,a=t._transitionData._frames,o=t._transitionData._frameHash;for(r=0;r=0;a--)if(s[a].enabled){r._indexToPoints=s[a]._indexToPoints;break}n&&n.calc&&(o=n.calc(t,r))}Array.isArray(o)&&o[0]||(o=[{x:h,y:h}]),o[0].t||(o[0].t={}),o[0].trace=r,d[e]=o}}for(z(o,c,f),i=0;i1e-10?t:0}function h(t,e,r){e=e||0,r=r||0;for(var n=t.length,i=new Array(n),a=0;a0?r:1/0})),i=n.mod(r+1,e.length);return[e[r],e[i]]},findIntersectionXY:c,findXYatLength:function(t,e,r,n){var i=-e*r,a=e*e+1,o=2*(e*i-r),s=i*i+r*r-t*t,l=Math.sqrt(o*o-4*a*s),c=(-o+l)/(2*a),u=(-o-l)/(2*a);return[[c,e*c+i+n],[u,e*u+i+n]]},clampTiny:f,pathPolygon:function(t,e,r,n,i,a){return"M"+h(u(t,e,r,n),i,a).join("L")},pathPolygonAnnulus:function(t,e,r,n,i,a,o){var s,l;t=90||s>90&&l>=450?1:u<=0&&h<=0?0:Math.max(u,h);e=s<=180&&l>=180||s>180&&l>=540?-1:c>=0&&f>=0?0:Math.min(c,f);r=s<=270&&l>=270||s>270&&l>=630?-1:u>=0&&h>=0?0:Math.min(u,h);n=l>=360?1:c<=0&&f<=0?0:Math.max(c,f);return[e,r,n,i]}(p),b=x[2]-x[0],_=x[3]-x[1],w=h/f,T=Math.abs(_/b);w>T?(d=f,y=(h-(m=f*T))/n.h/2,g=[o[0],o[1]],v=[s[0]+y,s[1]-y]):(m=h,y=(f-(d=h/T))/n.w/2,g=[o[0]+y,o[1]-y],v=[s[0],s[1]]),this.xLength2=d,this.yLength2=m,this.xDomain2=g,this.yDomain2=v;var k,A=this.xOffset2=n.l+n.w*g[0],M=this.yOffset2=n.t+n.h*(1-v[1]),S=this.radius=d/b,E=this.innerRadius=this.getHole(e)*S,L=this.cx=A-S*x[0],C=this.cy=M+S*x[3],P=this.cxx=L-A,I=this.cyy=C-M,O=i.side;"counterclockwise"===O?(k=O,O="top"):"clockwise"===O&&(k=O,O="bottom"),this.radialAxis=this.mockAxis(t,e,i,{_id:"x",side:O,_trueSide:k,domain:[E/n.w,S/n.w]}),this.angularAxis=this.mockAxis(t,e,a,{side:"right",domain:[0,Math.PI],autorange:!1}),this.doAutoRange(t,e),this.updateAngularAxis(t,e),this.updateRadialAxis(t,e),this.updateRadialAxisTitle(t,e),this.xaxis=this.mockCartesianAxis(t,e,{_id:"x",domain:g}),this.yaxis=this.mockCartesianAxis(t,e,{_id:"y",domain:v});var z=this.pathSubplot();this.clipPaths.forTraces.select("path").attr("d",z).attr("transform",l(P,I)),r.frontplot.attr("transform",l(A,M)).call(u.setClipUrl,this._hasClipOnAxisFalse?null:this.clipIds.forTraces,this.gd),r.bg.attr("d",z).attr("transform",l(L,C)).call(c.fill,e.bgcolor)},N.mockAxis=function(t,e,r,n){var i=o.extendFlat({},r,n);return d(i,e,t),i},N.mockCartesianAxis=function(t,e,r){var n=this,i=n.isSmith,a=r._id,s=o.extendFlat({type:"linear"},r);p(s,t);var l={x:[0,2],y:[1,3]};return s.setRange=function(){var t=n.sectorBBox,r=l[a],i=n.radialAxis._rl,o=(i[1]-i[0])/(1-n.getHole(e));s.range=[t[r[0]]*o,t[r[1]]*o]},s.isPtWithinRange="x"!==a||i?function(){return!0}:function(t){return n.isPtInside(t)},s.setRange(),s.setScale(),s},N.doAutoRange=function(t,e){var r=this.gd,n=this.radialAxis,i=this.getRadial(e);m(r,n);var a=n.range;i.range=a.slice(),i._input.range=a.slice(),n._rl=[n.r2l(a[0],null,"gregorian"),n.r2l(a[1],null,"gregorian")]},N.updateRadialAxis=function(t,e){var r=this,n=r.gd,i=r.layers,a=r.radius,u=r.innerRadius,f=r.cx,p=r.cy,d=r.getRadial(e),m=D(r.getSector(e)[0],360),g=r.radialAxis,v=u90&&m<=270&&(g.tickangle=180);var x=y?function(t){var e=O(r,C([t.x,0]));return l(e[0]-f,e[1]-p)}:function(t){return l(g.l2p(t.x)+u,0)},b=y?function(t){return I(r,t.x,-1/0,1/0)}:function(t){return r.pathArc(g.r2p(t.x)+u)},_=j(d);if(r.radialTickLayout!==_&&(i["radial-axis"].selectAll(".xtick").remove(),r.radialTickLayout=_),v){g.setScale();var w=0,T=y?(g.tickvals||[]).filter((function(t){return t>=0})).map((function(t){return h.tickText(g,t,!0,!1)})):h.calcTicks(g),k=y?T:h.clipEnds(g,T),A=h.getTickSigns(g)[2];y&&(("top"===g.ticks&&"bottom"===g.side||"bottom"===g.ticks&&"top"===g.side)&&(A=-A),"top"===g.ticks&&"top"===g.side&&(w=-g.ticklen),"bottom"===g.ticks&&"bottom"===g.side&&(w=g.ticklen)),h.drawTicks(n,g,{vals:T,layer:i["radial-axis"],path:h.makeTickPath(g,0,A),transFn:x,crisp:!1}),h.drawGrid(n,g,{vals:k,layer:i["radial-grid"],path:b,transFn:o.noop,crisp:!1}),h.drawLabels(n,g,{vals:T,layer:i["radial-axis"],transFn:x,labelFns:h.makeLabelFns(g,w)})}var M=r.radialAxisAngle=r.vangles?F(U(R(d.angle),r.vangles)):d.angle,S=l(f,p),E=S+s(-M);V(i["radial-axis"],v&&(d.showticklabels||d.ticks),{transform:E}),V(i["radial-grid"],v&&d.showgrid,{transform:y?"":S}),V(i["radial-line"].select("line"),v&&d.showline,{x1:y?-a:u,y1:0,x2:a,y2:0,transform:E}).attr("stroke-width",d.linewidth).call(c.stroke,d.linecolor)},N.updateRadialAxisTitle=function(t,e,r){if(!this.isSmith){var n=this.gd,i=this.radius,a=this.cx,o=this.cy,s=this.getRadial(e),l=this.id+"title",c=0;if(s.title){var f=u.bBox(this.layers["radial-axis"].node()).height,h=s.title.font.size,p=s.side;c="top"===p?h:"counterclockwise"===p?-(f+.4*h):f+.8*h}var d=void 0!==r?r:this.radialAxisAngle,m=R(d),g=Math.cos(m),v=Math.sin(m),y=a+i/2*g+c*v,b=o-i/2*v+c*g;this.layers["radial-axis-title"]=x.draw(n,l,{propContainer:s,propName:this.id+".radialaxis.title",placeholder:z(n,"Click to enter radial axis title"),attributes:{x:y,y:b,"text-anchor":"middle"},transform:{rotate:-d}})}},N.updateAngularAxis=function(t,e){var r=this,n=r.gd,i=r.layers,a=r.radius,u=r.innerRadius,f=r.cx,p=r.cy,d=r.getAngular(e),m=r.angularAxis,g=r.isSmith;g||(r.fillViewInitialKey("angularaxis.rotation",d.rotation),m.setGeometry(),m.setScale());var v=g?function(t){var e=O(r,C([0,t.x]));return Math.atan2(e[0]-f,e[1]-p)-Math.PI/2}:function(t){return m.t2g(t.x)};"linear"===m.type&&"radians"===m.thetaunit&&(m.tick0=F(m.tick0),m.dtick=F(m.dtick));var y=function(t){return l(f+a*Math.cos(t),p-a*Math.sin(t))},x=g?function(t){var e=O(r,C([0,t.x]));return l(e[0],e[1])}:function(t){return y(v(t))},b=g?function(t){var e=O(r,C([0,t.x])),n=Math.atan2(e[0]-f,e[1]-p)-Math.PI/2;return l(e[0],e[1])+s(-F(n))}:function(t){var e=v(t);return y(e)+s(-F(e))},_=g?function(t){return P(r,t.x,0,1/0)}:function(t){var e=v(t),r=Math.cos(e),n=Math.sin(e);return"M"+[f+u*r,p-u*n]+"L"+[f+a*r,p-a*n]},w=h.makeLabelFns(m,0).labelStandoff,T={xFn:function(t){var e=v(t);return Math.cos(e)*w},yFn:function(t){var e=v(t),r=Math.sin(e)>0?.2:1;return-Math.sin(e)*(w+t.fontSize*r)+Math.abs(Math.cos(e))*(t.fontSize*M)},anchorFn:function(t){var e=v(t),r=Math.cos(e);return Math.abs(r)<.1?"middle":r>0?"start":"end"},heightFn:function(t,e,r){var n=v(t);return-.5*(1+Math.sin(n))*r}},k=j(d);r.angularTickLayout!==k&&(i["angular-axis"].selectAll("."+m._id+"tick").remove(),r.angularTickLayout=k);var A,S=g?[1/0].concat(m.tickvals||[]).map((function(t){return h.tickText(m,t,!0,!1)})):h.calcTicks(m);if(g&&(S[0].text="\u221e",S[0].fontSize*=1.75),"linear"===e.gridshape?(A=S.map(v),o.angleDelta(A[0],A[1])<0&&(A=A.slice().reverse())):A=null,r.vangles=A,"category"===m.type&&(S=S.filter((function(t){return o.isAngleInsideSector(v(t),r.sectorInRad)}))),m.visible){var E="inside"===m.ticks?-1:1,L=(m.linewidth||1)/2;h.drawTicks(n,m,{vals:S,layer:i["angular-axis"],path:"M"+E*L+",0h"+E*m.ticklen,transFn:b,crisp:!1}),h.drawGrid(n,m,{vals:S,layer:i["angular-grid"],path:_,transFn:o.noop,crisp:!1}),h.drawLabels(n,m,{vals:S,layer:i["angular-axis"],repositionOnUpdate:!0,transFn:x,labelFns:T})}V(i["angular-line"].select("path"),d.showline,{d:r.pathSubplot(),transform:l(f,p)}).attr("stroke-width",d.linewidth).call(c.stroke,d.linecolor)},N.updateFx=function(t,e){this.gd._context.staticPlot||(!this.isSmith&&(this.updateAngularDrag(t),this.updateRadialDrag(t,e,0),this.updateRadialDrag(t,e,1)),this.updateHoverAndMainDrag(t))},N.updateHoverAndMainDrag=function(t){var e,r,s=this,c=s.isSmith,u=s.gd,f=s.layers,h=t._zoomlayer,p=S.MINZOOM,d=S.OFFEDGE,m=s.radius,x=s.innerRadius,T=s.cx,k=s.cy,A=s.cxx,M=s.cyy,L=s.sectorInRad,C=s.vangles,P=s.radialAxis,I=E.clampTiny,O=E.findXYatLength,z=E.findEnclosingVertexAngles,D=S.cornerHalfWidth,R=S.cornerLen/2,F=g.makeDragger(f,"path","maindrag","crosshair");n.select(F).attr("d",s.pathSubplot()).attr("transform",l(T,k)),F.onmousemove=function(t){y.hover(u,t,s.id),u._fullLayout._lasthover=F,u._fullLayout._hoversubplot=s.id},F.onmouseout=function(t){u._dragging||v.unhover(u,t)};var B,N,j,U,V,H,q,G,Y,W={element:F,gd:u,subplot:s.id,plotinfo:{id:s.id,xaxis:s.xaxis,yaxis:s.yaxis},xaxes:[s.xaxis],yaxes:[s.yaxis]};function X(t,e){return Math.sqrt(t*t+e*e)}function Z(t,e){return X(t-A,e-M)}function J(t,e){return Math.atan2(M-e,t-A)}function K(t,e){return[t*Math.cos(e),t*Math.sin(-e)]}function Q(t,e){if(0===t)return s.pathSector(2*D);var r=R/t,n=e-r,i=e+r,a=Math.max(0,Math.min(t,m)),o=a-D,l=a+D;return"M"+K(o,n)+"A"+[o,o]+" 0,0,0 "+K(o,i)+"L"+K(l,i)+"A"+[l,l]+" 0,0,1 "+K(l,n)+"Z"}function $(t,e,r){if(0===t)return s.pathSector(2*D);var n,i,a=K(t,e),o=K(t,r),l=I((a[0]+o[0])/2),c=I((a[1]+o[1])/2);if(l&&c){var u=c/l,f=-1/u,h=O(D,u,l,c);n=O(R,f,h[0][0],h[0][1]),i=O(R,f,h[1][0],h[1][1])}else{var p,d;c?(p=R,d=D):(p=D,d=R),n=[[l-p,c-d],[l+p,c-d]],i=[[l-p,c+d],[l+p,c+d]]}return"M"+n.join("L")+"L"+i.reverse().join("L")+"Z"}function tt(t,e){return e=Math.max(Math.min(e,m),x),tp?(t-1&&1===t&&_(e,u,[s.xaxis],[s.yaxis],s.id,W),r.indexOf("event")>-1&&y.click(u,e,s.id)}W.prepFn=function(t,n,a){var l=u._fullLayout.dragmode,f=F.getBoundingClientRect();u._fullLayout._calcInverseTransform(u);var p=u._fullLayout._invTransform;e=u._fullLayout._invScaleX,r=u._fullLayout._invScaleY;var d=o.apply3DTransform(p)(n-f.left,a-f.top);if(B=d[0],N=d[1],C){var v=E.findPolygonOffset(m,L[0],L[1],C);B+=A+v[0],N+=M+v[1]}switch(l){case"zoom":W.clickFn=st,c||(W.moveFn=C?it:rt,W.doneFn=at,function(){j=null,U=null,V=s.pathSubplot(),H=!1;var t=u._fullLayout[s.id];q=i(t.bgcolor).getLuminance(),(G=g.makeZoombox(h,q,T,k,V)).attr("fill-rule","evenodd"),Y=g.makeCorners(h,T,k),w(u)}());break;case"select":case"lasso":b(t,n,a,W,l)}},v.init(W)},N.updateRadialDrag=function(t,e,r){var i=this,c=i.gd,u=i.layers,f=i.radius,h=i.innerRadius,p=i.cx,d=i.cy,m=i.radialAxis,y=S.radialDragBoxSize,x=y/2;if(m.visible){var b,_,T,M=R(i.radialAxisAngle),E=m._rl,L=E[0],C=E[1],P=E[r],I=.75*(E[1]-E[0])/(1-i.getHole(e))/f;r?(b=p+(f+x)*Math.cos(M),_=d-(f+x)*Math.sin(M),T="radialdrag"):(b=p+(h-x)*Math.cos(M),_=d-(h-x)*Math.sin(M),T="radialdrag-inner");var O,z,D,B=g.makeRectDragger(u,T,"crosshair",-x,-x,y,y),N={element:B,gd:c};V(n.select(B),m.visible&&h0==(r?D>L:Dn?function(t){return t<=0}:function(t){return t>=0};t.c2g=function(r){var n=t.c2l(r)-e;return(s(n)?n:0)+o},t.g2c=function(r){return t.l2c(r+e-o)},t.g2p=function(t){return t*a},t.c2p=function(e){return t.g2p(t.c2g(e))}}}(t,e);break;case"angularaxis":!function(t,e){var r=t.type;if("linear"===r){var i=t.d2c,s=t.c2d;t.d2c=function(t,e){return function(t,e){return"degrees"===e?a(t):t}(i(t),e)},t.c2d=function(t,e){return s(function(t,e){return"degrees"===e?o(t):t}(t,e))}}t.makeCalcdata=function(e,i){var a,o,s=e[i],l=e._length,c=function(r){return t.d2c(r,e.thetaunit)};if(s){if(n.isTypedArray(s)&&"linear"===r){if(l===s.length)return s;if(s.subarray)return s.subarray(0,l)}for(a=new Array(l),o=0;o0?1:0}function i(t){var e=t[0],r=t[1];if(!isFinite(e)||!isFinite(r))return[1,0];var n=(e+1)*(e+1)+r*r;return[(e*e+r*r-1)/n,2*r/n]}function a(t,e){var r=e[0],n=e[1];return[r*t.radius+t.cx,-n*t.radius+t.cy]}function o(t,e){return e*t.radius}e.exports={smith:i,reactanceArc:function(t,e,r,n){var s=a(t,i([r,e])),l=s[0],c=s[1],u=a(t,i([n,e])),f=u[0],h=u[1];if(0===e)return["M"+l+","+c,"L"+f+","+h].join(" ");var p=o(t,1/Math.abs(e));return["M"+l+","+c,"A"+p+","+p+" 0 0,"+(e<0?1:0)+" "+f+","+h].join(" ")},resistanceArc:function(t,e,r,s){var l=o(t,1/(e+1)),c=a(t,i([e,r])),u=c[0],f=c[1],h=a(t,i([e,s])),p=h[0],d=h[1];if(n(r)!==n(s)){var m=a(t,i([e,0]));return["M"+u+","+f,"A"+l+","+l+" 0 0,"+(00){for(var n=[],i=0;i=u&&(h.min=0,d.min=0,g.min=0,t.aaxis&&delete t.aaxis.min,t.baxis&&delete t.baxis.min,t.caxis&&delete t.caxis.min)}function m(t,e,r,n){var i=h[e._name];function o(r,n){return a.coerce(t,e,i,r,n)}o("uirevision",n.uirevision),e.type="linear";var p=o("color"),d=p!==i.color.dflt?p:r.font.color,m=e._name.charAt(0).toUpperCase(),g="Component "+m,v=o("title.text",g);e._hovertitle=v===g?v:m,a.coerceFont(o,"title.font",{family:r.font.family,size:a.bigFont(r.font.size),color:d}),o("min"),u(t,e,o,"linear"),l(t,e,o,"linear"),s(t,e,o,"linear"),c(t,e,o,{outerTicks:!0}),o("showticklabels")&&(a.coerceFont(o,"tickfont",{family:r.font.family,size:r.font.size,color:d}),o("tickangle"),o("tickformat")),f(t,e,o,{dfltColor:p,bgColor:r.bgColor,blend:60,showLine:!0,showGrid:!0,noZeroLine:!0,attributes:i}),o("hoverformat"),o("layer")}e.exports=function(t,e,r){o(t,e,r,{type:"ternary",attributes:h,handleDefaults:d,font:e.font,paper_bgcolor:e.paper_bgcolor})}},{"../../components/color":361,"../../lib":498,"../../plot_api/plot_template":538,"../cartesian/line_grid_defaults":566,"../cartesian/prefix_suffix_defaults":568,"../cartesian/tick_label_defaults":573,"../cartesian/tick_mark_defaults":574,"../cartesian/tick_value_defaults":575,"../subplot_defaults":627,"./layout_attributes":630}],632:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("tinycolor2"),a=t("../../registry"),o=t("../../lib"),s=o.strTranslate,l=o._,c=t("../../components/color"),u=t("../../components/drawing"),f=t("../cartesian/set_convert"),h=t("../../lib/extend").extendFlat,p=t("../plots"),d=t("../cartesian/axes"),m=t("../../components/dragelement"),g=t("../../components/fx"),v=t("../../components/dragelement/helpers"),y=v.freeMode,x=v.rectMode,b=t("../../components/titles"),_=t("../cartesian/select").prepSelect,w=t("../cartesian/select").selectOnClick,T=t("../cartesian/select").clearSelect,k=t("../cartesian/select").clearSelectionsCache,A=t("../cartesian/constants");function M(t,e){this.id=t.id,this.graphDiv=t.graphDiv,this.init(e),this.makeFramework(e),this.aTickLayout=null,this.bTickLayout=null,this.cTickLayout=null}e.exports=M;var S=M.prototype;S.init=function(t){this.container=t._ternarylayer,this.defs=t._defs,this.layoutId=t._uid,this.traceHash={},this.layers={}},S.plot=function(t,e){var r=e[this.id],n=e._size;this._hasClipOnAxisFalse=!1;for(var i=0;iE*b?i=(a=b)*E:a=(i=x)/E,o=v*i/x,l=y*a/b,r=e.l+e.w*m-i/2,n=e.t+e.h*(1-g)-a/2,p.x0=r,p.y0=n,p.w=i,p.h=a,p.sum=_,p.xaxis={type:"linear",range:[w+2*k-_,_-w-2*T],domain:[m-o/2,m+o/2],_id:"x"},f(p.xaxis,p.graphDiv._fullLayout),p.xaxis.setScale(),p.xaxis.isPtWithinRange=function(t){return t.a>=p.aaxis.range[0]&&t.a<=p.aaxis.range[1]&&t.b>=p.baxis.range[1]&&t.b<=p.baxis.range[0]&&t.c>=p.caxis.range[1]&&t.c<=p.caxis.range[0]},p.yaxis={type:"linear",range:[w,_-T-k],domain:[g-l/2,g+l/2],_id:"y"},f(p.yaxis,p.graphDiv._fullLayout),p.yaxis.setScale(),p.yaxis.isPtWithinRange=function(){return!0};var A=p.yaxis.domain[0],M=p.aaxis=h({},t.aaxis,{range:[w,_-T-k],side:"left",tickangle:(+t.aaxis.tickangle||0)-30,domain:[A,A+l*E],anchor:"free",position:0,_id:"y",_length:i});f(M,p.graphDiv._fullLayout),M.setScale();var S=p.baxis=h({},t.baxis,{range:[_-w-k,T],side:"bottom",domain:p.xaxis.domain,anchor:"free",position:0,_id:"x",_length:i});f(S,p.graphDiv._fullLayout),S.setScale();var L=p.caxis=h({},t.caxis,{range:[_-w-T,k],side:"right",tickangle:(+t.caxis.tickangle||0)+30,domain:[A,A+l*E],anchor:"free",position:0,_id:"y",_length:i});f(L,p.graphDiv._fullLayout),L.setScale();var C="M"+r+","+(n+a)+"h"+i+"l-"+i/2+",-"+a+"Z";p.clipDef.select("path").attr("d",C),p.layers.plotbg.select("path").attr("d",C);var P="M0,"+a+"h"+i+"l-"+i/2+",-"+a+"Z";p.clipDefRelative.select("path").attr("d",P);var I=s(r,n);p.plotContainer.selectAll(".scatterlayer,.maplayer").attr("transform",I),p.clipDefRelative.select("path").attr("transform",null);var O=s(r-S._offset,n+a);p.layers.baxis.attr("transform",O),p.layers.bgrid.attr("transform",O);var z=s(r+i/2,n)+"rotate(30)"+s(0,-M._offset);p.layers.aaxis.attr("transform",z),p.layers.agrid.attr("transform",z);var D=s(r+i/2,n)+"rotate(-30)"+s(0,-L._offset);p.layers.caxis.attr("transform",D),p.layers.cgrid.attr("transform",D),p.drawAxes(!0),p.layers.aline.select("path").attr("d",M.showline?"M"+r+","+(n+a)+"l"+i/2+",-"+a:"M0,0").call(c.stroke,M.linecolor||"#000").style("stroke-width",(M.linewidth||0)+"px"),p.layers.bline.select("path").attr("d",S.showline?"M"+r+","+(n+a)+"h"+i:"M0,0").call(c.stroke,S.linecolor||"#000").style("stroke-width",(S.linewidth||0)+"px"),p.layers.cline.select("path").attr("d",L.showline?"M"+(r+i/2)+","+n+"l"+i/2+","+a:"M0,0").call(c.stroke,L.linecolor||"#000").style("stroke-width",(L.linewidth||0)+"px"),p.graphDiv._context.staticPlot||p.initInteractions(),u.setClipUrl(p.layers.frontplot,p._hasClipOnAxisFalse?null:p.clipId,p.graphDiv)},S.drawAxes=function(t){var e=this.graphDiv,r=this.id.substr(7)+"title",n=this.layers,i=this.aaxis,a=this.baxis,o=this.caxis;if(this.drawAx(i),this.drawAx(a),this.drawAx(o),t){var s=Math.max(i.showticklabels?i.tickfont.size/2:0,(o.showticklabels?.75*o.tickfont.size:0)+("outside"===o.ticks?.87*o.ticklen:0)),c=(a.showticklabels?a.tickfont.size:0)+("outside"===a.ticks?a.ticklen:0)+3;n["a-title"]=b.draw(e,"a"+r,{propContainer:i,propName:this.id+".aaxis.title",placeholder:l(e,"Click to enter Component A title"),attributes:{x:this.x0+this.w/2,y:this.y0-i.title.font.size/3-s,"text-anchor":"middle"}}),n["b-title"]=b.draw(e,"b"+r,{propContainer:a,propName:this.id+".baxis.title",placeholder:l(e,"Click to enter Component B title"),attributes:{x:this.x0-c,y:this.y0+this.h+.83*a.title.font.size+c,"text-anchor":"middle"}}),n["c-title"]=b.draw(e,"c"+r,{propContainer:o,propName:this.id+".caxis.title",placeholder:l(e,"Click to enter Component C title"),attributes:{x:this.x0+this.w+c,y:this.y0+this.h+.83*o.title.font.size+c,"text-anchor":"middle"}})}},S.drawAx=function(t){var e,r=this.graphDiv,n=t._name,i=n.charAt(0),a=t._id,s=this.layers[n],l=i+"tickLayout",c=(e=t).ticks+String(e.ticklen)+String(e.showticklabels);this[l]!==c&&(s.selectAll("."+a+"tick").remove(),this[l]=c),t.setScale();var u=d.calcTicks(t),f=d.clipEnds(t,u),h=d.makeTransTickFn(t),p=d.getTickSigns(t)[2],m=o.deg2rad(30),g=p*(t.linewidth||1)/2,v=p*t.ticklen,y=this.w,x=this.h,b="b"===i?"M0,"+g+"l"+Math.sin(m)*v+","+Math.cos(m)*v:"M"+g+",0l"+Math.cos(m)*v+","+-Math.sin(m)*v,_={a:"M0,0l"+x+",-"+y/2,b:"M0,0l-"+y/2+",-"+x,c:"M0,0l-"+x+","+y/2}[i];d.drawTicks(r,t,{vals:"inside"===t.ticks?f:u,layer:s,path:b,transFn:h,crisp:!1}),d.drawGrid(r,t,{vals:f,layer:this.layers[i+"grid"],path:_,transFn:h,crisp:!1}),d.drawLabels(r,t,{vals:u,layer:s,transFn:h,labelFns:d.makeLabelFns(t,0,30)})};var L=A.MINZOOM/2+.87,C="m-0.87,.5h"+L+"v3h-"+(L+5.2)+"l"+(L/2+2.6)+",-"+(.87*L+4.5)+"l2.6,1.5l-"+L/2+","+.87*L+"Z",P="m0.87,.5h-"+L+"v3h"+(L+5.2)+"l-"+(L/2+2.6)+",-"+(.87*L+4.5)+"l-2.6,1.5l"+L/2+","+.87*L+"Z",I="m0,1l"+L/2+","+.87*L+"l2.6,-1.5l-"+(L/2+2.6)+",-"+(.87*L+4.5)+"l-"+(L/2+2.6)+","+(.87*L+4.5)+"l2.6,1.5l"+L/2+",-"+.87*L+"Z",O=!0;function z(t){n.select(t).selectAll(".zoombox,.js-zoombox-backdrop,.js-zoombox-menu,.zoombox-corners").remove()}S.clearSelect=function(){k(this.dragOptions),T(this.dragOptions.gd)},S.initInteractions=function(){var t,e,r,n,f,h,p,d,v,b,T,k,M=this,S=M.layers.plotbg.select("path").node(),L=M.graphDiv,D=L._fullLayout._zoomlayer;function R(t){var e={};return e[M.id+".aaxis.min"]=t.a,e[M.id+".baxis.min"]=t.b,e[M.id+".caxis.min"]=t.c,e}function F(t,e){var r=L._fullLayout.clickmode;z(L),2===t&&(L.emit("plotly_doubleclick",null),a.call("_guiRelayout",L,R({a:0,b:0,c:0}))),r.indexOf("select")>-1&&1===t&&w(e,L,[M.xaxis],[M.yaxis],M.id,M.dragOptions),r.indexOf("event")>-1&&g.click(L,e,M.id)}function B(t,e){return 1-e/M.h}function N(t,e){return 1-(t+(M.h-e)/Math.sqrt(3))/M.w}function j(t,e){return(t-(M.h-e)/Math.sqrt(3))/M.w}function U(i,a){var o=r+i*t,s=n+a*e,l=Math.max(0,Math.min(1,B(0,n),B(0,s))),c=Math.max(0,Math.min(1,N(r,n),N(o,s))),u=Math.max(0,Math.min(1,j(r,n),j(o,s))),m=(l/2+u)*M.w,g=(1-l/2-c)*M.w,y=(m+g)/2,x=g-m,_=(1-l)*M.h,w=_-x/E;x.2?"rgba(0,0,0,0.4)":"rgba(255,255,255,0.3)").duration(200),k.transition().style("opacity",1).duration(200),b=!0),L.emit("plotly_relayouting",R(p))}function V(){z(L),p!==f&&(a.call("_guiRelayout",L,R(p)),O&&L.data&&L._context.showTips&&(o.notifier(l(L,"Double-click to zoom back out"),"long"),O=!1))}function H(t,e){var r=t/M.xaxis._m,n=e/M.yaxis._m,i=[(p={a:f.a-n,b:f.b+(r+n)/2,c:f.c-(r-n)/2}).a,p.b,p.c].sort(o.sorterAsc),a=i.indexOf(p.a),l=i.indexOf(p.b),c=i.indexOf(p.c);i[0]<0&&(i[1]+i[0]/2<0?(i[2]+=i[0]+i[1],i[0]=i[1]=0):(i[2]+=i[0]/2,i[1]+=i[0]/2,i[0]=0),p={a:i[a],b:i[l],c:i[c]},e=(f.a-p.a)*M.yaxis._m,t=(f.c-p.c-f.b+p.b)*M.xaxis._m);var h=s(M.x0+t,M.y0+e);M.plotContainer.selectAll(".scatterlayer,.maplayer").attr("transform",h);var d=s(-t,-e);M.clipDefRelative.select("path").attr("transform",d),M.aaxis.range=[p.a,M.sum-p.b-p.c],M.baxis.range=[M.sum-p.a-p.c,p.b],M.caxis.range=[M.sum-p.a-p.b,p.c],M.drawAxes(!1),M._hasClipOnAxisFalse&&M.plotContainer.select(".scatterlayer").selectAll(".trace").call(u.hideOutsideRangePoints,M),L.emit("plotly_relayouting",R(p))}function q(){a.call("_guiRelayout",L,R(p))}this.dragOptions={element:S,gd:L,plotinfo:{id:M.id,domain:L._fullLayout[M.id].domain,xaxis:M.xaxis,yaxis:M.yaxis},subplot:M.id,prepFn:function(a,l,u){M.dragOptions.xaxes=[M.xaxis],M.dragOptions.yaxes=[M.yaxis],t=L._fullLayout._invScaleX,e=L._fullLayout._invScaleY;var m=M.dragOptions.dragmode=L._fullLayout.dragmode;y(m)?M.dragOptions.minDrag=1:M.dragOptions.minDrag=void 0,"zoom"===m?(M.dragOptions.moveFn=U,M.dragOptions.clickFn=F,M.dragOptions.doneFn=V,function(t,e,a){var l=S.getBoundingClientRect();r=e-l.left,n=a-l.top,L._fullLayout._calcInverseTransform(L);var u=L._fullLayout._invTransform,m=o.apply3DTransform(u)(r,n);r=m[0],n=m[1],f={a:M.aaxis.range[0],b:M.baxis.range[1],c:M.caxis.range[1]},p=f,h=M.aaxis.range[1]-f.a,d=i(M.graphDiv._fullLayout[M.id].bgcolor).getLuminance(),v="M0,"+M.h+"L"+M.w/2+", 0L"+M.w+","+M.h+"Z",b=!1,T=D.append("path").attr("class","zoombox").attr("transform",s(M.x0,M.y0)).style({fill:d>.2?"rgba(0,0,0,0)":"rgba(255,255,255,0)","stroke-width":0}).attr("d",v),k=D.append("path").attr("class","zoombox-corners").attr("transform",s(M.x0,M.y0)).style({fill:c.background,stroke:c.defaultLine,"stroke-width":1,opacity:0}).attr("d","M0,0Z"),M.clearSelect(L)}(0,l,u)):"pan"===m?(M.dragOptions.moveFn=H,M.dragOptions.clickFn=F,M.dragOptions.doneFn=q,f={a:M.aaxis.range[0],b:M.baxis.range[1],c:M.caxis.range[1]},p=f,M.clearSelect(L)):(x(m)||y(m))&&_(a,l,u,M.dragOptions,m)}},S.onmousemove=function(t){g.hover(L,t,M.id),L._fullLayout._lasthover=S,L._fullLayout._hoversubplot=M.id},S.onmouseout=function(t){L._dragging||m.unhover(L,t)},m.init(this.dragOptions)}},{"../../components/color":361,"../../components/dragelement":380,"../../components/dragelement/helpers":379,"../../components/drawing":383,"../../components/fx":401,"../../components/titles":459,"../../lib":498,"../../lib/extend":488,"../../registry":633,"../cartesian/axes":549,"../cartesian/constants":556,"../cartesian/select":570,"../cartesian/set_convert":571,"../plots":614,"@plotly/d3":58,tinycolor2:307}],633:[function(t,e,r){"use strict";var n=t("./lib/loggers"),i=t("./lib/noop"),a=t("./lib/push_unique"),o=t("./lib/is_plain_object"),s=t("./lib/dom").addStyleRule,l=t("./lib/extend"),c=t("./plots/attributes"),u=t("./plots/layout_attributes"),f=l.extendFlat,h=l.extendDeepAll;function p(t){var e=t.name,i=t.categories,a=t.meta;if(r.modules[e])n.log("Type "+e+" already registered");else{r.subplotsRegistry[t.basePlotModule.name]||function(t){var e=t.name;if(r.subplotsRegistry[e])return void n.log("Plot type "+e+" already registered.");for(var i in v(t),r.subplotsRegistry[e]=t,r.componentsRegistry)b(i,t.name)}(t.basePlotModule);for(var o={},l=0;l-1&&(f[p[r]].title={text:""});for(r=0;r")?"":e.html(t).text()}));return e.remove(),r}(_),_=(_=_.replace(/&(?!\w+;|\#[0-9]+;| \#x[0-9A-F]+;)/g,"&")).replace(c,"'"),i.isIE()&&(_=(_=(_=_.replace(/"/gi,"'")).replace(/(\('#)([^']*)('\))/gi,'("#$2")')).replace(/(\\')/gi,'"')),_}},{"../components/color":361,"../components/drawing":383,"../constants/xmlns_namespaces":475,"../lib":498,"@plotly/d3":58}],642:[function(t,e,r){"use strict";var n=t("../../lib");e.exports=function(t,e){for(var r=0;rf+c||!n(u))}for(var p=0;pa))return e}return void 0!==r?r:t.dflt},r.coerceColor=function(t,e,r){return i(e).isValid()?e:void 0!==r?r:t.dflt},r.coerceEnumerated=function(t,e,r){return t.coerceNumber&&(e=+e),-1!==t.values.indexOf(e)?e:void 0!==r?r:t.dflt},r.getValue=function(t,e){var r;return Array.isArray(t)?e0?e+=r:u<0&&(e-=r)}return e}function z(t){var e=u,r=t.b,i=O(t);return n.inbox(r-e,i-e,_+(i-e)/(i-r)-1)}var D=t[f+"a"],R=t[h+"a"];m=Math.abs(D.r2c(D.range[1])-D.r2c(D.range[0]));var F=n.getDistanceFunction(i,p,d,(function(t){return(p(t)+d(t))/2}));if(n.getClosest(g,F,t),!1!==t.index&&g[t.index].p!==c){k||(L=function(t){return Math.min(A(t),t.p-y.bargroupwidth/2)},C=function(t){return Math.max(M(t),t.p+y.bargroupwidth/2)});var B=g[t.index],N=v.base?B.b+B.s:B.s;t[h+"0"]=t[h+"1"]=R.c2p(B[h],!0),t[h+"LabelVal"]=N;var j=y.extents[y.extents.round(B.p)];t[f+"0"]=D.c2p(x?L(B):j[0],!0),t[f+"1"]=D.c2p(x?C(B):j[1],!0);var U=void 0!==B.orig_p;return t[f+"LabelVal"]=U?B.orig_p:B.p,t.labelLabel=l(D,t[f+"LabelVal"],v[f+"hoverformat"]),t.valueLabel=l(R,t[h+"LabelVal"],v[h+"hoverformat"]),t.baseLabel=l(R,B.b,v[h+"hoverformat"]),t.spikeDistance=(function(t){var e=u,r=t.b,i=O(t);return n.inbox(r-e,i-e,w+(i-e)/(i-r)-1)}(B)+function(t){return P(A(t),M(t),w)}(B))/2,t[f+"Spike"]=D.c2p(B.p,!0),o(B,v,t),t.hovertemplate=v.hovertemplate,t}}function f(t,e){var r=e.mcc||t.marker.color,n=e.mlcc||t.marker.line.color,i=s(t,e);return a.opacity(r)?r:a.opacity(n)&&i?n:void 0}e.exports={hoverPoints:function(t,e,r,n,a){var o=u(t,e,r,n,a);if(o){var s=o.cd,l=s[0].trace,c=s[o.index];return o.color=f(l,c),i.getComponentMethod("errorbars","hoverInfo")(c,l,o),[o]}},hoverOnBars:u,getTraceColor:f}},{"../../components/color":361,"../../components/fx":401,"../../constants/numerical":474,"../../lib":498,"../../plots/cartesian/axes":549,"../../registry":633,"./helpers":649}],651:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults").supplyDefaults,crossTraceDefaults:t("./defaults").crossTraceDefaults,supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc"),crossTraceCalc:t("./cross_trace_calc").crossTraceCalc,colorbar:t("../scatter/marker_colorbar"),arraysToCalcdata:t("./arrays_to_calcdata"),plot:t("./plot").plot,style:t("./style").style,styleOnSelect:t("./style").styleOnSelect,hoverPoints:t("./hover").hoverPoints,eventData:t("./event_data"),selectPoints:t("./select"),moduleType:"trace",name:"bar",basePlotModule:t("../../plots/cartesian"),categories:["bar-like","cartesian","svg","bar","oriented","errorBarsOK","showLegend","zoomScale"],animatable:!0,meta:{}}},{"../../plots/cartesian":563,"../scatter/marker_colorbar":940,"./arrays_to_calcdata":642,"./attributes":643,"./calc":644,"./cross_trace_calc":646,"./defaults":647,"./event_data":648,"./hover":650,"./layout_attributes":652,"./layout_defaults":653,"./plot":654,"./select":655,"./style":657}],652:[function(t,e,r){"use strict";e.exports={barmode:{valType:"enumerated",values:["stack","group","overlay","relative"],dflt:"group",editType:"calc"},barnorm:{valType:"enumerated",values:["","fraction","percent"],dflt:"",editType:"calc"},bargap:{valType:"number",min:0,max:1,editType:"calc"},bargroupgap:{valType:"number",min:0,max:1,dflt:0,editType:"calc"}}},{}],653:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../plots/cartesian/axes"),a=t("../../lib"),o=t("./layout_attributes");e.exports=function(t,e,r){function s(r,n){return a.coerce(t,e,o,r,n)}for(var l=!1,c=!1,u=!1,f={},h=s("barmode"),p=0;p0}function S(t){return"auto"===t?0:t}function E(t,e){var r=Math.PI/180*e,n=Math.abs(Math.sin(r)),i=Math.abs(Math.cos(r));return{x:t.width*i+t.height*n,y:t.width*n+t.height*i}}function L(t,e,r,n,i,a){var o=!!a.isHorizontal,s=!!a.constrained,l=a.angle||0,c=a.anchor||"end",u="end"===c,f="start"===c,h=((a.leftToRight||0)+1)/2,p=1-h,d=i.width,m=i.height,g=Math.abs(e-t),v=Math.abs(n-r),y=g>2*_&&v>2*_?_:0;g-=2*y,v-=2*y;var x=S(l);"auto"!==l||d<=g&&m<=v||!(d>g||m>v)||(d>v||m>g)&&d.01?q:function(t,e,r){return r&&t===e?t:Math.abs(t-e)>=2?q(t):t>e?Math.ceil(t):Math.floor(t)};B=G(B,N,D),N=G(N,B,D),j=G(j,U,!D),U=G(U,j,!D)}var Y=A(a.ensureSingle(I,"path"),P,g,v);if(Y.style("vector-effect","non-scaling-stroke").attr("d",isNaN((N-B)*(U-j))||V&&t._context.staticPlot?"M0,0Z":"M"+B+","+j+"V"+U+"H"+N+"V"+j+"Z").call(l.setClipUrl,e.layerClipId,t),!P.uniformtext.mode&&R){var W=l.makePointStyleFns(f);l.singlePointStyle(c,Y,f,W,t)}!function(t,e,r,n,i,s,c,f,p,g,v){var w,T=e.xaxis,M=e.yaxis,C=t._fullLayout;function P(e,r,n){return a.ensureSingle(e,"text").text(r).attr({class:"bartext bartext-"+w,"text-anchor":"middle","data-notex":1}).call(l.font,n).call(o.convertToTspans,t)}var I=n[0].trace,O="h"===I.orientation,z=function(t,e,r,n,i){var o,s=e[0].trace;o=s.texttemplate?function(t,e,r,n,i){var o=e[0].trace,s=a.castOption(o,r,"texttemplate");if(!s)return"";var l,c,f,h,p="histogram"===o.type,d="waterfall"===o.type,m="funnel"===o.type,g="h"===o.orientation;g?(l="y",c=i,f="x",h=n):(l="x",c=n,f="y",h=i);function v(t){return u(h,h.c2l(t),!0).text}var y=e[r],x={};x.label=y.p,x.labelLabel=x[l+"Label"]=(_=y.p,u(c,c.c2l(_),!0).text);var _;var w=a.castOption(o,y.i,"text");(0===w||w)&&(x.text=w);x.value=y.s,x.valueLabel=x[f+"Label"]=v(y.s);var T={};b(T,o,y.i),(p||void 0===T.x)&&(T.x=g?x.value:x.label);(p||void 0===T.y)&&(T.y=g?x.label:x.value);(p||void 0===T.xLabel)&&(T.xLabel=g?x.valueLabel:x.labelLabel);(p||void 0===T.yLabel)&&(T.yLabel=g?x.labelLabel:x.valueLabel);d&&(x.delta=+y.rawS||y.s,x.deltaLabel=v(x.delta),x.final=y.v,x.finalLabel=v(x.final),x.initial=x.final-x.delta,x.initialLabel=v(x.initial));m&&(x.value=y.s,x.valueLabel=v(x.value),x.percentInitial=y.begR,x.percentInitialLabel=a.formatPercent(y.begR),x.percentPrevious=y.difR,x.percentPreviousLabel=a.formatPercent(y.difR),x.percentTotal=y.sumR,x.percenTotalLabel=a.formatPercent(y.sumR));var k=a.castOption(o,y.i,"customdata");k&&(x.customdata=k);return a.texttemplateString(s,x,t._d3locale,T,x,o._meta||{})}(t,e,r,n,i):s.textinfo?function(t,e,r,n){var i=t[0].trace,o="h"===i.orientation,s="waterfall"===i.type,l="funnel"===i.type;function c(t){return u(o?r:n,+t,!0).text}var f,h=i.textinfo,p=t[e],d=h.split("+"),m=[],g=function(t){return-1!==d.indexOf(t)};g("label")&&m.push((v=t[e].p,u(o?n:r,v,!0).text));var v;g("text")&&(0===(f=a.castOption(i,p.i,"text"))||f)&&m.push(f);if(s){var y=+p.rawS||p.s,x=p.v,b=x-y;g("initial")&&m.push(c(b)),g("delta")&&m.push(c(y)),g("final")&&m.push(c(x))}if(l){g("value")&&m.push(c(p.s));var _=0;g("percent initial")&&_++,g("percent previous")&&_++,g("percent total")&&_++;var w=_>1;g("percent initial")&&(f=a.formatPercent(p.begR),w&&(f+=" of initial"),m.push(f)),g("percent previous")&&(f=a.formatPercent(p.difR),w&&(f+=" of previous"),m.push(f)),g("percent total")&&(f=a.formatPercent(p.sumR),w&&(f+=" of total"),m.push(f))}return m.join("
")}(e,r,n,i):m.getValue(s.text,r);return m.coerceString(y,o)}(C,n,i,T,M);w=function(t,e){var r=m.getValue(t.textposition,e);return m.coerceEnumerated(x,r)}(I,i);var D="stack"===g.mode||"relative"===g.mode,R=n[i],F=!D||R._outmost;if(!z||"none"===w||(R.isBlank||s===c||f===p)&&("auto"===w||"inside"===w))return void r.select("text").remove();var B=C.font,N=d.getBarColor(n[i],I),j=d.getInsideTextFont(I,i,B,N),U=d.getOutsideTextFont(I,i,B),V=r.datum();O?"log"===T.type&&V.s0<=0&&(s=T.range[0]=G*(Z/Y):Z>=Y*(X/G);G>0&&Y>0&&(J||K||Q)?w="inside":(w="outside",H.remove(),H=null)}else w="inside";if(!H){W=a.ensureUniformFontSize(t,"outside"===w?U:j);var $=(H=P(r,z,W)).attr("transform");if(H.attr("transform",""),q=l.bBox(H.node()),G=q.width,Y=q.height,H.attr("transform",$),G<=0||Y<=0)return void H.remove()}var tt,et,rt=I.textangle;"outside"===w?(et="both"===I.constraintext||"outside"===I.constraintext,tt=function(t,e,r,n,i,a){var o,s=!!a.isHorizontal,l=!!a.constrained,c=a.angle||0,u=i.width,f=i.height,h=Math.abs(e-t),p=Math.abs(n-r);o=s?p>2*_?_:0:h>2*_?_:0;var d=1;l&&(d=s?Math.min(1,p/f):Math.min(1,h/u));var m=S(c),g=E(i,m),v=(s?g.x:g.y)/2,y=(i.left+i.right)/2,x=(i.top+i.bottom)/2,b=(t+e)/2,w=(r+n)/2,T=0,A=0,M=s?k(e,t):k(r,n);s?(b=e-M*o,T=M*v):(w=n+M*o,A=-M*v);return{textX:y,textY:x,targetX:b,targetY:w,anchorX:T,anchorY:A,scale:d,rotate:m}}(s,c,f,p,q,{isHorizontal:O,constrained:et,angle:rt})):(et="both"===I.constraintext||"inside"===I.constraintext,tt=L(s,c,f,p,q,{isHorizontal:O,constrained:et,angle:rt,anchor:I.insidetextanchor}));tt.fontSize=W.size,h("histogram"===I.type?"bar":I.type,tt,C),R.transform=tt,A(H,C,g,v).attr("transform",a.getTextTransform(tt))}(t,e,I,r,p,B,N,j,U,g,v),e.layerClipId&&l.hideOutsideRangePoint(c,I.select("text"),w,C,f.xcalendar,f.ycalendar)}));var j=!1===f.cliponaxis;l.setClipUrl(c,j?null:e.layerClipId,t)}));c.getComponentMethod("errorbars","plot")(t,I,e,g)},toMoveInsideBar:L}},{"../../components/color":361,"../../components/drawing":383,"../../components/fx/helpers":397,"../../lib":498,"../../lib/svg_text_utils":524,"../../plots/cartesian/axes":549,"../../registry":633,"./attributes":643,"./constants":645,"./helpers":649,"./style":657,"./uniform_text":659,"@plotly/d3":58,"fast-isnumeric":185}],655:[function(t,e,r){"use strict";function n(t,e,r,n,i){var a=e.c2p(n?t.s0:t.p0,!0),o=e.c2p(n?t.s1:t.p1,!0),s=r.c2p(n?t.p0:t.s0,!0),l=r.c2p(n?t.p1:t.s1,!0);return i?[(a+o)/2,(s+l)/2]:n?[o,(s+l)/2]:[(a+o)/2,l]}e.exports=function(t,e){var r,i=t.cd,a=t.xaxis,o=t.yaxis,s=i[0].trace,l="funnel"===s.type,c="h"===s.orientation,u=[];if(!1===e)for(r=0;r1||0===i.bargap&&0===i.bargroupgap&&!t[0].trace.marker.line.width)&&n.select(this).attr("shape-rendering","crispEdges")})),e.selectAll("g.points").each((function(e){d(n.select(this),e[0].trace,t)})),s.getComponentMethod("errorbars","style")(e)},styleTextPoints:m,styleOnSelect:function(t,e,r){var i=e[0].trace;i.selectedpoints?function(t,e,r){a.selectedPointStyle(t.selectAll("path"),e),function(t,e,r){t.each((function(t){var i,s=n.select(this);if(t.selected){i=o.ensureUniformFontSize(r,g(s,t,e,r));var l=e.selected.textfont&&e.selected.textfont.color;l&&(i.color=l),a.font(s,i)}else a.selectedTextStyle(s,e)}))}(t.selectAll("text"),e,r)}(r,i,t):(d(r,i,t),s.getComponentMethod("errorbars","style")(r))},getInsideTextFont:y,getOutsideTextFont:x,getBarColor:_,resizeText:l}},{"../../components/color":361,"../../components/drawing":383,"../../lib":498,"../../registry":633,"./attributes":643,"./helpers":649,"./uniform_text":659,"@plotly/d3":58}],658:[function(t,e,r){"use strict";var n=t("../../components/color"),i=t("../../components/colorscale/helpers").hasColorscale,a=t("../../components/colorscale/defaults"),o=t("../../lib").coercePattern;e.exports=function(t,e,r,s,l){var c=r("marker.color",s),u=i(t,"marker");u&&a(t,e,l,r,{prefix:"marker.",cLetter:"c"}),r("marker.line.color",n.defaultLine),i(t,"marker.line")&&a(t,e,l,r,{prefix:"marker.line.",cLetter:"c"}),r("marker.line.width"),r("marker.opacity"),o(r,"marker.pattern",c,u),r("selected.marker.color"),r("unselected.marker.color")}},{"../../components/color":361,"../../components/colorscale/defaults":371,"../../components/colorscale/helpers":372,"../../lib":498}],659:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib");function a(t){return"_"+t+"Text_minsize"}e.exports={recordMinTextSize:function(t,e,r){if(r.uniformtext.mode){var n=a(t),i=r.uniformtext.minsize,o=e.scale*e.fontSize;e.hide=oh.range[1]&&(x+=Math.PI);if(n.getClosest(c,(function(t){return m(y,x,[t.rp0,t.rp1],[t.thetag0,t.thetag1],d)?g+Math.min(1,Math.abs(t.thetag1-t.thetag0)/v)-1+(t.rp1-y)/(t.rp1-t.rp0)-1:1/0}),t),!1!==t.index){var b=c[t.index];t.x0=t.x1=b.ct[0],t.y0=t.y1=b.ct[1];var _=i.extendFlat({},b,{r:b.s,theta:b.p});return o(b,u,t),s(_,u,f,t),t.hovertemplate=u.hovertemplate,t.color=a(u,b),t.xLabelVal=t.yLabelVal=void 0,b.s<0&&(t.idealAlign="left"),[t]}}},{"../../components/fx":401,"../../lib":498,"../../plots/polar/helpers":616,"../bar/hover":650,"../scatterpolar/hover":1001}],664:[function(t,e,r){"use strict";e.exports={moduleType:"trace",name:"barpolar",basePlotModule:t("../../plots/polar"),categories:["polar","bar","showLegend"],attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults"),supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc").calc,crossTraceCalc:t("./calc").crossTraceCalc,plot:t("./plot"),colorbar:t("../scatter/marker_colorbar"),formatLabels:t("../scatterpolar/format_labels"),style:t("../bar/style").style,styleOnSelect:t("../bar/style").styleOnSelect,hoverPoints:t("./hover"),selectPoints:t("../bar/select"),meta:{}}},{"../../plots/polar":617,"../bar/select":655,"../bar/style":657,"../scatter/marker_colorbar":940,"../scatterpolar/format_labels":1e3,"./attributes":660,"./calc":661,"./defaults":662,"./hover":663,"./layout_attributes":665,"./layout_defaults":666,"./plot":667}],665:[function(t,e,r){"use strict";e.exports={barmode:{valType:"enumerated",values:["stack","overlay"],dflt:"stack",editType:"calc"},bargap:{valType:"number",dflt:.1,min:0,max:1,editType:"calc"}}},{}],666:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes");e.exports=function(t,e,r){var a,o={};function s(r,o){return n.coerce(t[a]||{},e[a],i,r,o)}for(var l=0;l0?(c=o,u=l):(c=l,u=o);var f=[s.findEnclosingVertexAngles(c,t.vangles)[0],(c+u)/2,s.findEnclosingVertexAngles(u,t.vangles)[1]];return s.pathPolygonAnnulus(n,i,c,u,f,e,r)};return function(t,n,i,o){return a.pathAnnulus(t,n,i,o,e,r)}}(e),p=e.layers.frontplot.select("g.barlayer");a.makeTraceGroups(p,r,"trace bars").each((function(){var r=n.select(this),s=a.ensureSingle(r,"g","points").selectAll("g.point").data(a.identity);s.enter().append("g").style("vector-effect","non-scaling-stroke").style("stroke-miterlimit",2).classed("point",!0),s.exit().remove(),s.each((function(t){var e,r=n.select(this),o=t.rp0=u.c2p(t.s0),s=t.rp1=u.c2p(t.s1),p=t.thetag0=f.c2g(t.p0),d=t.thetag1=f.c2g(t.p1);if(i(o)&&i(s)&&i(p)&&i(d)&&o!==s&&p!==d){var m=u.c2g(t.s1),g=(p+d)/2;t.ct=[l.c2p(m*Math.cos(g)),c.c2p(m*Math.sin(g))],e=h(o,s,p,d)}else e="M0,0Z";a.ensureSingle(r,"path").attr("d",e)})),o.setClipUrl(r,e._hasClipOnAxisFalse?e.clipIds.forTraces:null,t)}))}},{"../../components/drawing":383,"../../lib":498,"../../plots/polar/helpers":616,"@plotly/d3":58,"fast-isnumeric":185}],668:[function(t,e,r){"use strict";var n=t("../scatter/attributes"),i=t("../bar/attributes"),a=t("../../components/color/attributes"),o=t("../../plots/cartesian/axis_format_attributes").axisHoverFormat,s=t("../../plots/template_attributes").hovertemplateAttrs,l=t("../../lib/extend").extendFlat,c=n.marker,u=c.line;e.exports={y:{valType:"data_array",editType:"calc+clearAxisTypes"},x:{valType:"data_array",editType:"calc+clearAxisTypes"},x0:{valType:"any",editType:"calc+clearAxisTypes"},y0:{valType:"any",editType:"calc+clearAxisTypes"},dx:{valType:"number",editType:"calc"},dy:{valType:"number",editType:"calc"},xperiod:n.xperiod,yperiod:n.yperiod,xperiod0:n.xperiod0,yperiod0:n.yperiod0,xperiodalignment:n.xperiodalignment,yperiodalignment:n.yperiodalignment,xhoverformat:o("x"),yhoverformat:o("y"),name:{valType:"string",editType:"calc+clearAxisTypes"},q1:{valType:"data_array",editType:"calc+clearAxisTypes"},median:{valType:"data_array",editType:"calc+clearAxisTypes"},q3:{valType:"data_array",editType:"calc+clearAxisTypes"},lowerfence:{valType:"data_array",editType:"calc"},upperfence:{valType:"data_array",editType:"calc"},notched:{valType:"boolean",editType:"calc"},notchwidth:{valType:"number",min:0,max:.5,dflt:.25,editType:"calc"},notchspan:{valType:"data_array",editType:"calc"},boxpoints:{valType:"enumerated",values:["all","outliers","suspectedoutliers",!1],editType:"calc"},jitter:{valType:"number",min:0,max:1,editType:"calc"},pointpos:{valType:"number",min:-2,max:2,editType:"calc"},boxmean:{valType:"enumerated",values:[!0,"sd",!1],editType:"calc"},mean:{valType:"data_array",editType:"calc"},sd:{valType:"data_array",editType:"calc"},orientation:{valType:"enumerated",values:["v","h"],editType:"calc+clearAxisTypes"},quartilemethod:{valType:"enumerated",values:["linear","exclusive","inclusive"],dflt:"linear",editType:"calc"},width:{valType:"number",min:0,dflt:0,editType:"calc"},marker:{outliercolor:{valType:"color",dflt:"rgba(0, 0, 0, 0)",editType:"style"},symbol:l({},c.symbol,{arrayOk:!1,editType:"plot"}),opacity:l({},c.opacity,{arrayOk:!1,dflt:1,editType:"style"}),size:l({},c.size,{arrayOk:!1,editType:"calc"}),color:l({},c.color,{arrayOk:!1,editType:"style"}),line:{color:l({},u.color,{arrayOk:!1,dflt:a.defaultLine,editType:"style"}),width:l({},u.width,{arrayOk:!1,dflt:0,editType:"style"}),outliercolor:{valType:"color",editType:"style"},outlierwidth:{valType:"number",min:0,dflt:1,editType:"style"},editType:"style"},editType:"plot"},line:{color:{valType:"color",editType:"style"},width:{valType:"number",min:0,dflt:2,editType:"style"},editType:"plot"},fillcolor:n.fillcolor,whiskerwidth:{valType:"number",min:0,max:1,dflt:.5,editType:"calc"},offsetgroup:i.offsetgroup,alignmentgroup:i.alignmentgroup,selected:{marker:n.selected.marker,editType:"style"},unselected:{marker:n.unselected.marker,editType:"style"},text:l({},n.text,{}),hovertext:l({},n.hovertext,{}),hovertemplate:s({}),hoveron:{valType:"flaglist",flags:["boxes","points"],dflt:"boxes+points",editType:"style"}}},{"../../components/color/attributes":360,"../../lib/extend":488,"../../plots/cartesian/axis_format_attributes":552,"../../plots/template_attributes":628,"../bar/attributes":643,"../scatter/attributes":922}],669:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("../../plots/cartesian/axes"),a=t("../../plots/cartesian/align_period"),o=t("../../lib"),s=t("../../constants/numerical").BADNUM,l=o._;e.exports=function(t,e){var r,c,y,x,b,_,w,T=t._fullLayout,k=i.getFromId(t,e.xaxis||"x"),A=i.getFromId(t,e.yaxis||"y"),M=[],S="violin"===e.type?"_numViolins":"_numBoxes";"h"===e.orientation?(y=k,x="x",b=A,_="y",w=!!e.yperiodalignment):(y=A,x="y",b=k,_="x",w=!!e.xperiodalignment);var E,L,C,P,I,O,z=function(t,e,r,i){var s,l=e+"0"in t,c="d"+e in t;if(e in t||l&&c){var u=r.makeCalcdata(t,e);return[a(t,r,e,u).vals,u]}s=l?t[e+"0"]:"name"in t&&("category"===r.type||n(t.name)&&-1!==["linear","log"].indexOf(r.type)||o.isDateTime(t.name)&&"date"===r.type)?t.name:i;for(var f="multicategory"===r.type?r.r2c_just_indices(s):r.d2c(s,0,t[e+"calendar"]),h=t._length,p=new Array(h),d=0;dE.uf};if(e._hasPreCompStats){var U=e[x],V=function(t){return y.d2c((e[t]||[])[r])},H=1/0,q=-1/0;for(r=0;r=E.q1&&E.q3>=E.med){var Y=V("lowerfence");E.lf=Y!==s&&Y<=E.q1?Y:p(E,C,P);var W=V("upperfence");E.uf=W!==s&&W>=E.q3?W:d(E,C,P);var X=V("mean");E.mean=X!==s?X:P?o.mean(C,P):(E.q1+E.q3)/2;var Z=V("sd");E.sd=X!==s&&Z>=0?Z:P?o.stdev(C,P,E.mean):E.q3-E.q1,E.lo=m(E),E.uo=g(E);var J=V("notchspan");J=J!==s&&J>0?J:v(E,P),E.ln=E.med-J,E.un=E.med+J;var K=E.lf,Q=E.uf;e.boxpoints&&C.length&&(K=Math.min(K,C[0]),Q=Math.max(Q,C[P-1])),e.notched&&(K=Math.min(K,E.ln),Q=Math.max(Q,E.un)),E.min=K,E.max=Q}else{var $;o.warn(["Invalid input - make sure that q1 <= median <= q3","q1 = "+E.q1,"median = "+E.med,"q3 = "+E.q3].join("\n")),$=E.med!==s?E.med:E.q1!==s?E.q3!==s?(E.q1+E.q3)/2:E.q1:E.q3!==s?E.q3:0,E.med=$,E.q1=E.q3=$,E.lf=E.uf=$,E.mean=E.sd=$,E.ln=E.un=$,E.min=E.max=$}H=Math.min(H,E.min),q=Math.max(q,E.max),E.pts2=L.filter(j),M.push(E)}}e._extremes[y._id]=i.findExtremes(y,[H,q],{padded:!0})}else{var tt=y.makeCalcdata(e,x),et=function(t,e){for(var r=t.length,n=new Array(r+1),i=0;i=0&&it0){var ut,ft;if((E={}).pos=E[_]=B[r],L=E.pts=nt[r].sort(f),P=(C=E[x]=L.map(h)).length,E.min=C[0],E.max=C[P-1],E.mean=o.mean(C,P),E.sd=o.stdev(C,P,E.mean),E.med=o.interp(C,.5),P%2&&(lt||ct))lt?(ut=C.slice(0,P/2),ft=C.slice(P/2+1)):ct&&(ut=C.slice(0,P/2+1),ft=C.slice(P/2)),E.q1=o.interp(ut,.5),E.q3=o.interp(ft,.5);else E.q1=o.interp(C,.25),E.q3=o.interp(C,.75);E.lf=p(E,C,P),E.uf=d(E,C,P),E.lo=m(E),E.uo=g(E);var ht=v(E,P);E.ln=E.med-ht,E.un=E.med+ht,at=Math.min(at,E.ln),ot=Math.max(ot,E.un),E.pts2=L.filter(j),M.push(E)}e._extremes[y._id]=i.findExtremes(y,e.notched?tt.concat([at,ot]):tt,{padded:!0})}return function(t,e){if(o.isArrayOrTypedArray(e.selectedpoints))for(var r=0;r0?(M[0].t={num:T[S],dPos:N,posLetter:_,valLetter:x,labels:{med:l(t,"median:"),min:l(t,"min:"),q1:l(t,"q1:"),q3:l(t,"q3:"),max:l(t,"max:"),mean:"sd"===e.boxmean?l(t,"mean \xb1 \u03c3:"):l(t,"mean:"),lf:l(t,"lower fence:"),uf:l(t,"upper fence:")}},T[S]++,M):[{t:{empty:!0}}]};var c={text:"tx",hovertext:"htx"};function u(t,e,r){for(var n in c)o.isArrayOrTypedArray(e[n])&&(Array.isArray(r)?o.isArrayOrTypedArray(e[n][r[0]])&&(t[c[n]]=e[n][r[0]][r[1]]):t[c[n]]=e[n][r])}function f(t,e){return t.v-e.v}function h(t){return t.v}function p(t,e,r){return 0===r?t.q1:Math.min(t.q1,e[Math.min(o.findBin(2.5*t.q1-1.5*t.q3,e,!0)+1,r-1)])}function d(t,e,r){return 0===r?t.q3:Math.max(t.q3,e[Math.max(o.findBin(2.5*t.q3-1.5*t.q1,e),0)])}function m(t){return 4*t.q1-3*t.q3}function g(t){return 4*t.q3-3*t.q1}function v(t,e){return 0===e?0:1.57*(t.q3-t.q1)/Math.sqrt(e)}},{"../../constants/numerical":474,"../../lib":498,"../../plots/cartesian/align_period":546,"../../plots/cartesian/axes":549,"fast-isnumeric":185}],670:[function(t,e,r){"use strict";var n=t("../../plots/cartesian/axes"),i=t("../../lib"),a=t("../../plots/cartesian/constraints").getAxisGroup,o=["v","h"];function s(t,e,r,o){var s,l,c,u=e.calcdata,f=e._fullLayout,h=o._id,p=h.charAt(0),d=[],m=0;for(s=0;s1,b=1-f[t+"gap"],_=1-f[t+"groupgap"];for(s=0;s0){var q=E.pointpos,G=E.jitter,Y=E.marker.size/2,W=0;q+G>=0&&((W=V*(q+G))>M?(H=!0,j=Y,B=W):W>R&&(j=Y,B=M)),W<=M&&(B=M);var X=0;q-G<=0&&((X=-V*(q-G))>S?(H=!0,U=Y,N=X):X>F&&(U=Y,N=S)),X<=S&&(N=S)}else B=M,N=S;var Z=new Array(c.length);for(l=0;l0?(g="v",v=x>0?Math.min(_,b):Math.min(b)):x>0?(g="h",v=Math.min(_)):v=0;if(v){e._length=v;var S=r("orientation",g);e._hasPreCompStats?"v"===S&&0===x?(r("x0",0),r("dx",1)):"h"===S&&0===y&&(r("y0",0),r("dy",1)):"v"===S&&0===x?r("x0"):"h"===S&&0===y&&r("y0"),i.getComponentMethod("calendars","handleTraceDefaults")(t,e,["x","y"],a)}else e.visible=!1}function f(t,e,r,i){var a=i.prefix,o=n.coerce2(t,e,c,"marker.outliercolor"),s=r("marker.line.outliercolor"),l="outliers";e._hasPreCompStats?l="all":(o||s)&&(l="suspectedoutliers");var u=r(a+"points",l);u?(r("jitter","all"===u?.3:0),r("pointpos","all"===u?-1.5:0),r("marker.symbol"),r("marker.opacity"),r("marker.size"),r("marker.color",e.line.color),r("marker.line.color"),r("marker.line.width"),"suspectedoutliers"===u&&(r("marker.line.outliercolor",e.marker.color),r("marker.line.outlierwidth")),r("selected.marker.color"),r("unselected.marker.color"),r("selected.marker.size"),r("unselected.marker.size"),r("text"),r("hovertext")):delete e.marker;var f=r("hoveron");"all"!==f&&-1===f.indexOf("points")||r("hovertemplate"),n.coerceSelectionMarkerOpacity(e,r)}e.exports={supplyDefaults:function(t,e,r,i){function s(r,i){return n.coerce(t,e,c,r,i)}if(u(t,e,s,i),!1!==e.visible){o(t,e,i,s),s("xhoverformat"),s("yhoverformat");var l=e._hasPreCompStats;l&&(s("lowerfence"),s("upperfence")),s("line.color",(t.marker||{}).color||r),s("line.width"),s("fillcolor",a.addOpacity(e.line.color,.5));var h=!1;if(l){var p=s("mean"),d=s("sd");p&&p.length&&(h=!0,d&&d.length&&(h="sd"))}s("boxmean",h),s("whiskerwidth"),s("width"),s("quartilemethod");var m=!1;if(l){var g=s("notchspan");g&&g.length&&(m=!0)}else n.validate(t.notchwidth,c.notchwidth)&&(m=!0);s("notched",m)&&s("notchwidth"),f(t,e,s,{prefix:"box"})}},crossTraceDefaults:function(t,e){var r,i;function a(t){return n.coerce(i._input,i,c,t)}for(var o=0;ot.lo&&(x.so=!0)}return a}));h.enter().append("path").classed("point",!0),h.exit().remove(),h.call(a.translatePoints,o,s)}function l(t,e,r,a){var o,s,l=e.val,c=e.pos,u=!!c.rangebreaks,f=a.bPos,h=a.bPosPxOffset||0,p=r.boxmean||(r.meanline||{}).visible;Array.isArray(a.bdPos)?(o=a.bdPos[0],s=a.bdPos[1]):(o=a.bdPos,s=a.bdPos);var d=t.selectAll("path.mean").data("box"===r.type&&r.boxmean||"violin"===r.type&&r.box.visible&&r.meanline.visible?i.identity:[]);d.enter().append("path").attr("class","mean").style({fill:"none","vector-effect":"non-scaling-stroke"}),d.exit().remove(),d.each((function(t){var e=c.c2l(t.pos+f,!0),i=c.l2p(e-o)+h,a=c.l2p(e+s)+h,d=u?(i+a)/2:c.l2p(e)+h,m=l.c2p(t.mean,!0),g=l.c2p(t.mean-t.sd,!0),v=l.c2p(t.mean+t.sd,!0);"h"===r.orientation?n.select(this).attr("d","M"+m+","+i+"V"+a+("sd"===p?"m0,0L"+g+","+d+"L"+m+","+i+"L"+v+","+d+"Z":"")):n.select(this).attr("d","M"+i+","+m+"H"+a+("sd"===p?"m0,0L"+d+","+g+"L"+i+","+m+"L"+d+","+v+"Z":""))}))}e.exports={plot:function(t,e,r,a){var c=e.xaxis,u=e.yaxis;i.makeTraceGroups(a,r,"trace boxes").each((function(t){var e,r,i=n.select(this),a=t[0],f=a.t,h=a.trace;(f.wdPos=f.bdPos*h.whiskerwidth,!0!==h.visible||f.empty)?i.remove():("h"===h.orientation?(e=u,r=c):(e=c,r=u),o(i,{pos:e,val:r},h,f),s(i,{x:c,y:u},h,f),l(i,{pos:e,val:r},h,f))}))},plotBoxAndWhiskers:o,plotPoints:s,plotBoxMean:l}},{"../../components/drawing":383,"../../lib":498,"@plotly/d3":58}],678:[function(t,e,r){"use strict";e.exports=function(t,e){var r,n,i=t.cd,a=t.xaxis,o=t.yaxis,s=[];if(!1===e)for(r=0;r=10)return null;for(var i=1/0,a=-1/0,o=e.length,s=0;s0?Math.floor:Math.ceil,I=L>0?Math.ceil:Math.floor,O=L>0?Math.min:Math.max,z=L>0?Math.max:Math.min,D=P(S+C),R=I(E-C),F=[[f=M(S)]];for(a=D;a*L=0;i--)a[u-i]=t[f][i],o[u-i]=e[f][i];for(s.push({x:a,y:o,bicubic:l}),i=f,a=[],o=[];i>=0;i--)a[f-i]=t[i][0],o[f-i]=e[i][0];return s.push({x:a,y:o,bicubic:c}),s}},{}],692:[function(t,e,r){"use strict";var n=t("../../plots/cartesian/axes"),i=t("../../lib/extend").extendFlat;e.exports=function(t,e,r){var a,o,s,l,c,u,f,h,p,d,m,g,v,y,x=t["_"+e],b=t[e+"axis"],_=b._gridlines=[],w=b._minorgridlines=[],T=b._boundarylines=[],k=t["_"+r],A=t[r+"axis"];"array"===b.tickmode&&(b.tickvals=x.slice());var M=t._xctrl,S=t._yctrl,E=M[0].length,L=M.length,C=t._a.length,P=t._b.length;n.prepTicks(b),"array"===b.tickmode&&delete b.tickvals;var I=b.smoothing?3:1;function O(n){var i,a,o,s,l,c,u,f,p,d,m,g,v=[],y=[],x={};if("b"===e)for(a=t.b2j(n),o=Math.floor(Math.max(0,Math.min(P-2,a))),s=a-o,x.length=P,x.crossLength=C,x.xy=function(e){return t.evalxy([],e,a)},x.dxy=function(e,r){return t.dxydi([],e,o,r,s)},i=0;i0&&(p=t.dxydi([],i-1,o,0,s),v.push(l[0]+p[0]/3),y.push(l[1]+p[1]/3),d=t.dxydi([],i-1,o,1,s),v.push(f[0]-d[0]/3),y.push(f[1]-d[1]/3)),v.push(f[0]),y.push(f[1]),l=f;else for(i=t.a2i(n),c=Math.floor(Math.max(0,Math.min(C-2,i))),u=i-c,x.length=C,x.crossLength=P,x.xy=function(e){return t.evalxy([],i,e)},x.dxy=function(e,r){return t.dxydj([],c,e,u,r)},a=0;a0&&(m=t.dxydj([],c,a-1,u,0),v.push(l[0]+m[0]/3),y.push(l[1]+m[1]/3),g=t.dxydj([],c,a-1,u,1),v.push(f[0]-g[0]/3),y.push(f[1]-g[1]/3)),v.push(f[0]),y.push(f[1]),l=f;return x.axisLetter=e,x.axis=b,x.crossAxis=A,x.value=n,x.constvar=r,x.index=h,x.x=v,x.y=y,x.smoothing=A.smoothing,x}function z(n){var i,a,o,s,l,c=[],u=[],f={};if(f.length=x.length,f.crossLength=k.length,"b"===e)for(o=Math.max(0,Math.min(P-2,n)),l=Math.min(1,Math.max(0,n-o)),f.xy=function(e){return t.evalxy([],e,n)},f.dxy=function(e,r){return t.dxydi([],e,o,r,l)},i=0;ix.length-1||_.push(i(z(o),{color:b.gridcolor,width:b.gridwidth}));for(h=u;hx.length-1||m<0||m>x.length-1))for(g=x[s],v=x[m],a=0;ax[x.length-1]||w.push(i(O(d),{color:b.minorgridcolor,width:b.minorgridwidth}));b.startline&&T.push(i(z(0),{color:b.startlinecolor,width:b.startlinewidth})),b.endline&&T.push(i(z(x.length-1),{color:b.endlinecolor,width:b.endlinewidth}))}else{for(l=5e-15,u=(c=[Math.floor((x[x.length-1]-b.tick0)/b.dtick*(1+l)),Math.ceil((x[0]-b.tick0)/b.dtick/(1+l))].sort((function(t,e){return t-e})))[0],f=c[1],h=u;h<=f;h++)p=b.tick0+b.dtick*h,_.push(i(O(p),{color:b.gridcolor,width:b.gridwidth}));for(h=u-1;hx[x.length-1]||w.push(i(O(d),{color:b.minorgridcolor,width:b.minorgridwidth}));b.startline&&T.push(i(O(x[0]),{color:b.startlinecolor,width:b.startlinewidth})),b.endline&&T.push(i(O(x[x.length-1]),{color:b.endlinecolor,width:b.endlinewidth}))}}},{"../../lib/extend":488,"../../plots/cartesian/axes":549}],693:[function(t,e,r){"use strict";var n=t("../../plots/cartesian/axes"),i=t("../../lib/extend").extendFlat;e.exports=function(t,e){var r,a,o,s=e._labels=[],l=e._gridlines;for(r=0;re.length&&(t=t.slice(0,e.length)):t=[],i=0;i90&&(p-=180,l=-l),{angle:p,flip:l,p:t.c2p(n,e,r),offsetMultplier:c}}},{}],707:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../components/drawing"),a=t("./map_1d_array"),o=t("./makepath"),s=t("./orient_text"),l=t("../../lib/svg_text_utils"),c=t("../../lib"),u=c.strRotate,f=c.strTranslate,h=t("../../constants/alignment");function p(t,e,r,i,s,l){var c="const-"+s+"-lines",u=r.selectAll("."+c).data(l);u.enter().append("path").classed(c,!0).style("vector-effect","non-scaling-stroke"),u.each((function(r){var i=r,s=i.x,l=i.y,c=a([],s,t.c2p),u=a([],l,e.c2p),f="M"+o(c,u,i.smoothing);n.select(this).attr("d",f).style("stroke-width",i.width).style("stroke",i.color).style("fill","none")})),u.exit().remove()}function d(t,e,r,a,o,c,h,p){var d=c.selectAll("text."+p).data(h);d.enter().append("text").classed(p,!0);var m=0,g={};return d.each((function(o,c){var h;if("auto"===o.axis.tickangle)h=s(a,e,r,o.xy,o.dxy);else{var p=(o.axis.tickangle+180)*Math.PI/180;h=s(a,e,r,o.xy,[Math.cos(p),Math.sin(p)])}c||(g={angle:h.angle,flip:h.flip});var d=(o.endAnchor?-1:1)*h.flip,v=n.select(this).attr({"text-anchor":d>0?"start":"end","data-notex":1}).call(i.font,o.font).text(o.text).call(l.convertToTspans,t),y=i.bBox(this);v.attr("transform",f(h.p[0],h.p[1])+u(h.angle)+f(o.axis.labelpadding*d,.3*y.height)),m=Math.max(m,y.width+o.axis.labelpadding)})),d.exit().remove(),g.maxExtent=m,g}e.exports=function(t,e,r,i){var l=e.xaxis,u=e.yaxis,f=t._fullLayout._clips;c.makeTraceGroups(i,r,"trace").each((function(e){var r=n.select(this),i=e[0],h=i.trace,m=h.aaxis,g=h.baxis,y=c.ensureSingle(r,"g","minorlayer"),x=c.ensureSingle(r,"g","majorlayer"),b=c.ensureSingle(r,"g","boundarylayer"),_=c.ensureSingle(r,"g","labellayer");r.style("opacity",h.opacity),p(l,u,x,m,"a",m._gridlines),p(l,u,x,g,"b",g._gridlines),p(l,u,y,m,"a",m._minorgridlines),p(l,u,y,g,"b",g._minorgridlines),p(l,u,b,m,"a-boundary",m._boundarylines),p(l,u,b,g,"b-boundary",g._boundarylines);var w=d(t,l,u,h,i,_,m._labels,"a-label"),T=d(t,l,u,h,i,_,g._labels,"b-label");!function(t,e,r,n,i,a,o,l){var u,f,h,p,d=c.aggNums(Math.min,null,r.a),m=c.aggNums(Math.max,null,r.a),g=c.aggNums(Math.min,null,r.b),y=c.aggNums(Math.max,null,r.b);u=.5*(d+m),f=g,h=r.ab2xy(u,f,!0),p=r.dxyda_rough(u,f),void 0===o.angle&&c.extendFlat(o,s(r,i,a,h,r.dxydb_rough(u,f)));v(t,e,r,n,h,p,r.aaxis,i,a,o,"a-title"),u=d,f=.5*(g+y),h=r.ab2xy(u,f,!0),p=r.dxydb_rough(u,f),void 0===l.angle&&c.extendFlat(l,s(r,i,a,h,r.dxyda_rough(u,f)));v(t,e,r,n,h,p,r.baxis,i,a,l,"b-title")}(t,_,h,i,l,u,w,T),function(t,e,r,n,i){var s,l,u,f,h=r.select("#"+t._clipPathId);h.size()||(h=r.append("clipPath").classed("carpetclip",!0));var p=c.ensureSingle(h,"path","carpetboundary"),d=e.clipsegments,m=[];for(f=0;f90&&y<270,b=n.select(this);b.text(h.title.text).call(l.convertToTspans,t),x&&(_=(-l.lineCount(b)+g)*m*a-_),b.attr("transform",f(e.p[0],e.p[1])+u(e.angle)+f(0,_)).attr("text-anchor","middle").call(i.font,h.title.font)})),b.exit().remove()}},{"../../components/drawing":383,"../../constants/alignment":466,"../../lib":498,"../../lib/svg_text_utils":524,"./makepath":704,"./map_1d_array":705,"./orient_text":706,"@plotly/d3":58}],708:[function(t,e,r){"use strict";var n=t("./constants"),i=t("../../lib/search").findBin,a=t("./compute_control_points"),o=t("./create_spline_evaluator"),s=t("./create_i_derivative_evaluator"),l=t("./create_j_derivative_evaluator");e.exports=function(t){var e=t._a,r=t._b,c=e.length,u=r.length,f=t.aaxis,h=t.baxis,p=e[0],d=e[c-1],m=r[0],g=r[u-1],v=e[e.length-1]-e[0],y=r[r.length-1]-r[0],x=v*n.RELATIVE_CULL_TOLERANCE,b=y*n.RELATIVE_CULL_TOLERANCE;p-=x,d+=x,m-=b,g+=b,t.isVisible=function(t,e){return t>p&&tm&&ed||eg},t.setScale=function(){var e=t._x,r=t._y,n=a(t._xctrl,t._yctrl,e,r,f.smoothing,h.smoothing);t._xctrl=n[0],t._yctrl=n[1],t.evalxy=o([t._xctrl,t._yctrl],c,u,f.smoothing,h.smoothing),t.dxydi=s([t._xctrl,t._yctrl],f.smoothing,h.smoothing),t.dxydj=l([t._xctrl,t._yctrl],f.smoothing,h.smoothing)},t.i2a=function(t){var r=Math.max(0,Math.floor(t[0]),c-2),n=t[0]-r;return(1-n)*e[r]+n*e[r+1]},t.j2b=function(t){var e=Math.max(0,Math.floor(t[1]),c-2),n=t[1]-e;return(1-n)*r[e]+n*r[e+1]},t.ij2ab=function(e){return[t.i2a(e[0]),t.j2b(e[1])]},t.a2i=function(t){var r=Math.max(0,Math.min(i(t,e),c-2)),n=e[r],a=e[r+1];return Math.max(0,Math.min(c-1,r+(t-n)/(a-n)))},t.b2j=function(t){var e=Math.max(0,Math.min(i(t,r),u-2)),n=r[e],a=r[e+1];return Math.max(0,Math.min(u-1,e+(t-n)/(a-n)))},t.ab2ij=function(e){return[t.a2i(e[0]),t.b2j(e[1])]},t.i2c=function(e,r){return t.evalxy([],e,r)},t.ab2xy=function(n,i,a){if(!a&&(ne[c-1]|ir[u-1]))return[!1,!1];var o=t.a2i(n),s=t.b2j(i),l=t.evalxy([],o,s);if(a){var f,h,p,d,m=0,g=0,v=[];ne[c-1]?(f=c-2,h=1,m=(n-e[c-1])/(e[c-1]-e[c-2])):h=o-(f=Math.max(0,Math.min(c-2,Math.floor(o)))),ir[u-1]?(p=u-2,d=1,g=(i-r[u-1])/(r[u-1]-r[u-2])):d=s-(p=Math.max(0,Math.min(u-2,Math.floor(s)))),m&&(t.dxydi(v,f,p,h,d),l[0]+=v[0]*m,l[1]+=v[1]*m),g&&(t.dxydj(v,f,p,h,d),l[0]+=v[0]*g,l[1]+=v[1]*g)}return l},t.c2p=function(t,e,r){return[e.c2p(t[0]),r.c2p(t[1])]},t.p2x=function(t,e,r){return[e.p2c(t[0]),r.p2c(t[1])]},t.dadi=function(t){var r=Math.max(0,Math.min(e.length-2,t));return e[r+1]-e[r]},t.dbdj=function(t){var e=Math.max(0,Math.min(r.length-2,t));return r[e+1]-r[e]},t.dxyda=function(e,r,n,i){var a=t.dxydi(null,e,r,n,i),o=t.dadi(e,n);return[a[0]/o,a[1]/o]},t.dxydb=function(e,r,n,i){var a=t.dxydj(null,e,r,n,i),o=t.dbdj(r,i);return[a[0]/o,a[1]/o]},t.dxyda_rough=function(e,r,n){var i=v*(n||.1),a=t.ab2xy(e+i,r,!0),o=t.ab2xy(e-i,r,!0);return[.5*(a[0]-o[0])/i,.5*(a[1]-o[1])/i]},t.dxydb_rough=function(e,r,n){var i=y*(n||.1),a=t.ab2xy(e,r+i,!0),o=t.ab2xy(e,r-i,!0);return[.5*(a[0]-o[0])/i,.5*(a[1]-o[1])/i]},t.dpdx=function(t){return t._m},t.dpdy=function(t){return t._m}}},{"../../lib/search":518,"./compute_control_points":696,"./constants":697,"./create_i_derivative_evaluator":698,"./create_j_derivative_evaluator":699,"./create_spline_evaluator":700}],709:[function(t,e,r){"use strict";var n=t("../../lib");e.exports=function(t,e,r){var i,a,o,s=[],l=[],c=t[0].length,u=t.length;function f(e,r){var n,i=0,a=0;return e>0&&void 0!==(n=t[r][e-1])&&(a++,i+=n),e0&&void 0!==(n=t[r-1][e])&&(a++,i+=n),r0&&a0&&i1e-5);return n.log("Smoother converged to",k,"after",A,"iterations"),t}},{"../../lib":498}],710:[function(t,e,r){"use strict";var n=t("../../lib").isArray1D;e.exports=function(t,e,r){var i=r("x"),a=i&&i.length,o=r("y"),s=o&&o.length;if(!a&&!s)return!1;if(e._cheater=!i,a&&!n(i)||s&&!n(o))e._length=null;else{var l=a?i.length:1/0;s&&(l=Math.min(l,o.length)),e.a&&e.a.length&&(l=Math.min(l,e.a.length)),e.b&&e.b.length&&(l=Math.min(l,e.b.length)),e._length=l}return!0}},{"../../lib":498}],711:[function(t,e,r){"use strict";var n=t("../../plots/template_attributes").hovertemplateAttrs,i=t("../scattergeo/attributes"),a=t("../../components/colorscale/attributes"),o=t("../../plots/attributes"),s=t("../../components/color/attributes").defaultLine,l=t("../../lib/extend").extendFlat,c=i.marker.line;e.exports=l({locations:{valType:"data_array",editType:"calc"},locationmode:i.locationmode,z:{valType:"data_array",editType:"calc"},geojson:l({},i.geojson,{}),featureidkey:i.featureidkey,text:l({},i.text,{}),hovertext:l({},i.hovertext,{}),marker:{line:{color:l({},c.color,{dflt:s}),width:l({},c.width,{dflt:1}),editType:"calc"},opacity:{valType:"number",arrayOk:!0,min:0,max:1,dflt:1,editType:"style"},editType:"calc"},selected:{marker:{opacity:i.selected.marker.opacity,editType:"plot"},editType:"plot"},unselected:{marker:{opacity:i.unselected.marker.opacity,editType:"plot"},editType:"plot"},hoverinfo:l({},o.hoverinfo,{editType:"calc",flags:["location","z","text","name"]}),hovertemplate:n(),showlegend:l({},o.showlegend,{dflt:!1})},a("",{cLetter:"z",editTypeOverride:"calc"}))},{"../../components/color/attributes":360,"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plots/attributes":545,"../../plots/template_attributes":628,"../scattergeo/attributes":964}],712:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("../../constants/numerical").BADNUM,a=t("../../components/colorscale/calc"),o=t("../scatter/arrays_to_calcdata"),s=t("../scatter/calc_selection");function l(t){return t&&"string"==typeof t}e.exports=function(t,e){var r,c=e._length,u=new Array(c);r=e.geojson?function(t){return l(t)||n(t)}:l;for(var f=0;f")}(t,f,o),[t]}},{"../../lib":498,"../../plots/cartesian/axes":549,"./attributes":711}],716:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../heatmap/colorbar"),calc:t("./calc"),calcGeoJSON:t("./plot").calcGeoJSON,plot:t("./plot").plot,style:t("./style").style,styleOnSelect:t("./style").styleOnSelect,hoverPoints:t("./hover"),eventData:t("./event_data"),selectPoints:t("./select"),moduleType:"trace",name:"choropleth",basePlotModule:t("../../plots/geo"),categories:["geo","noOpacity","showLegend"],meta:{}}},{"../../plots/geo":584,"../heatmap/colorbar":790,"./attributes":711,"./calc":712,"./defaults":713,"./event_data":714,"./hover":715,"./plot":717,"./select":718,"./style":719}],717:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../../lib/geo_location_utils"),o=t("../../lib/topojson_utils").getTopojsonFeatures,s=t("../../plots/cartesian/autorange").findExtremes,l=t("./style").style;e.exports={calcGeoJSON:function(t,e){for(var r=t[0].trace,n=e[r.geo],i=n._subplot,l=r.locationmode,c=r._length,u="geojson-id"===l?a.extractTraceFeature(t):o(r,i.topojson),f=[],h=[],p=0;p=0;n--){var i=r[n].id;if("string"==typeof i&&0===i.indexOf("water"))for(var a=n+1;a=0;r--)t.removeLayer(e[r][1])},s.dispose=function(){var t=this.subplot.map;this._removeLayers(),t.removeSource(this.sourceId)},e.exports=function(t,e){var r=e[0].trace,i=new o(t,r.uid),a=i.sourceId,s=n(e),l=i.below=t.belowLookup["trace-"+r.uid];return t.map.addSource(a,{type:"geojson",data:s.geojson}),i._addLayers(s,l),e[0].trace._glTrace=i,i}},{"../../plots/mapbox/constants":606,"./convert":721}],725:[function(t,e,r){"use strict";var n=t("../../components/colorscale/attributes"),i=t("../../plots/cartesian/axis_format_attributes").axisHoverFormat,a=t("../../plots/template_attributes").hovertemplateAttrs,o=t("../mesh3d/attributes"),s=t("../../plots/attributes"),l=t("../../lib/extend").extendFlat,c={x:{valType:"data_array",editType:"calc+clearAxisTypes"},y:{valType:"data_array",editType:"calc+clearAxisTypes"},z:{valType:"data_array",editType:"calc+clearAxisTypes"},u:{valType:"data_array",editType:"calc"},v:{valType:"data_array",editType:"calc"},w:{valType:"data_array",editType:"calc"},sizemode:{valType:"enumerated",values:["scaled","absolute"],editType:"calc",dflt:"scaled"},sizeref:{valType:"number",editType:"calc",min:0},anchor:{valType:"enumerated",editType:"calc",values:["tip","tail","cm","center"],dflt:"cm"},text:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertext:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertemplate:a({editType:"calc"},{keys:["norm"]}),uhoverformat:i("u",1),vhoverformat:i("v",1),whoverformat:i("w",1),xhoverformat:i("x"),yhoverformat:i("y"),zhoverformat:i("z"),showlegend:l({},s.showlegend,{dflt:!1})};l(c,n("",{colorAttr:"u/v/w norm",showScaleDflt:!0,editTypeOverride:"calc"}));["opacity","lightposition","lighting"].forEach((function(t){c[t]=o[t]})),c.hoverinfo=l({},s.hoverinfo,{editType:"calc",flags:["x","y","z","u","v","w","norm","text","name"],dflt:"x+y+z+norm+text+name"}),c.transforms=void 0,e.exports=c},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plots/attributes":545,"../../plots/cartesian/axis_format_attributes":552,"../../plots/template_attributes":628,"../mesh3d/attributes":862}],726:[function(t,e,r){"use strict";var n=t("../../components/colorscale/calc");e.exports=function(t,e){for(var r=e.u,i=e.v,a=e.w,o=Math.min(e.x.length,e.y.length,e.z.length,r.length,i.length,a.length),s=-1/0,l=1/0,c=0;co.level||o.starts.length&&a===o.level)}break;case"constraint":if(n.prefixBoundary=!1,n.edgepaths.length)return;var s=n.x.length,l=n.y.length,c=-1/0,u=1/0;for(r=0;r":p>c&&(n.prefixBoundary=!0);break;case"<":(pc||n.starts.length&&h===u)&&(n.prefixBoundary=!0);break;case"][":f=Math.min(p[0],p[1]),h=Math.max(p[0],p[1]),fc&&(n.prefixBoundary=!0)}}}},{}],733:[function(t,e,r){"use strict";var n=t("../../components/colorscale"),i=t("./make_color_map"),a=t("./end_plus");e.exports={min:"zmin",max:"zmax",calc:function(t,e,r){var o=e.contours,s=e.line,l=o.size||1,c=o.coloring,u=i(e,{isColorbar:!0});if("heatmap"===c){var f=n.extractOpts(e);r._fillgradient=f.reversescale?n.flipScale(f.colorscale):f.colorscale,r._zrange=[f.min,f.max]}else"fill"===c&&(r._fillcolor=u);r._line={color:"lines"===c?u:s.color,width:!1!==o.showlines?s.width:0,dash:s.dash},r._levels={start:o.start,end:a(o),size:l}}}},{"../../components/colorscale":373,"./end_plus":741,"./make_color_map":746}],734:[function(t,e,r){"use strict";e.exports={BOTTOMSTART:[1,9,13,104,713],TOPSTART:[4,6,7,104,713],LEFTSTART:[8,12,14,208,1114],RIGHTSTART:[2,3,11,208,1114],NEWDELTA:[null,[-1,0],[0,-1],[-1,0],[1,0],null,[0,-1],[-1,0],[0,1],[0,1],null,[0,1],[1,0],[1,0],[0,-1]],CHOOSESADDLE:{104:[4,1],208:[2,8],713:[7,13],1114:[11,14]},SADDLEREMAINDER:{1:4,2:8,4:1,7:13,8:2,11:14,13:7,14:11},LABELDISTANCE:2,LABELINCREASE:10,LABELMIN:3,LABELMAX:10,LABELOPTIMIZER:{EDGECOST:1,ANGLECOST:1,NEIGHBORCOST:5,SAMELEVELFACTOR:10,SAMELEVELDISTANCE:5,MAXCOST:100,INITIALSEARCHPOINTS:10,ITERATIONS:5}}},{}],735:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("./label_defaults"),a=t("../../components/color"),o=a.addOpacity,s=a.opacity,l=t("../../constants/filter_ops"),c=l.CONSTRAINT_REDUCTION,u=l.COMPARISON_OPS2;e.exports=function(t,e,r,a,l,f){var h,p,d,m=e.contours,g=r("contours.operation");(m._operation=c[g],function(t,e){var r;-1===u.indexOf(e.operation)?(t("contours.value",[0,1]),Array.isArray(e.value)?e.value.length>2?e.value=e.value.slice(2):0===e.length?e.value=[0,1]:e.length<2?(r=parseFloat(e.value[0]),e.value=[r,r+1]):e.value=[parseFloat(e.value[0]),parseFloat(e.value[1])]:n(e.value)&&(r=parseFloat(e.value),e.value=[r,r+1])):(t("contours.value",0),n(e.value)||(Array.isArray(e.value)?e.value=parseFloat(e.value[0]):e.value=0))}(r,m),"="===g?h=m.showlines=!0:(h=r("contours.showlines"),d=r("fillcolor",o((t.line||{}).color||l,.5))),h)&&(p=r("line.color",d&&s(d)?o(e.fillcolor,1):l),r("line.width",2),r("line.dash"));r("line.smoothing"),i(r,a,p,f)}},{"../../components/color":361,"../../constants/filter_ops":470,"./label_defaults":745,"fast-isnumeric":185}],736:[function(t,e,r){"use strict";var n=t("../../constants/filter_ops"),i=t("fast-isnumeric");function a(t,e){var r,a=Array.isArray(e);function o(t){return i(t)?+t:null}return-1!==n.COMPARISON_OPS2.indexOf(t)?r=o(a?e[0]:e):-1!==n.INTERVAL_OPS.indexOf(t)?r=a?[o(e[0]),o(e[1])]:[o(e),o(e)]:-1!==n.SET_OPS.indexOf(t)&&(r=a?e.map(o):[o(e)]),r}function o(t){return function(e){e=a(t,e);var r=Math.min(e[0],e[1]),n=Math.max(e[0],e[1]);return{start:r,end:n,size:n-r}}}function s(t){return function(e){return{start:e=a(t,e),end:1/0,size:1/0}}}e.exports={"[]":o("[]"),"][":o("]["),">":s(">"),"<":s("<"),"=":s("=")}},{"../../constants/filter_ops":470,"fast-isnumeric":185}],737:[function(t,e,r){"use strict";e.exports=function(t,e,r,n){var i=n("contours.start"),a=n("contours.end"),o=!1===i||!1===a,s=r("contours.size");!(o?e.autocontour=!0:r("autocontour",!1))&&s||r("ncontours")}},{}],738:[function(t,e,r){"use strict";var n=t("../../lib");function i(t){return n.extendFlat({},t,{edgepaths:n.extendDeep([],t.edgepaths),paths:n.extendDeep([],t.paths),starts:n.extendDeep([],t.starts)})}e.exports=function(t,e){var r,a,o,s=function(t){return t.reverse()},l=function(t){return t};switch(e){case"=":case"<":return t;case">":for(1!==t.length&&n.warn("Contour data invalid for the specified inequality operation."),a=t[0],r=0;r1e3){n.warn("Too many contours, clipping at 1000",t);break}return l}},{"../../lib":498,"./constraint_mapping":736,"./end_plus":741}],741:[function(t,e,r){"use strict";e.exports=function(t){return t.end+t.size/1e6}},{}],742:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./constants");function a(t,e,r,n){return Math.abs(t[0]-e[0])20&&e?208===t||1114===t?n=0===r[0]?1:-1:a=0===r[1]?1:-1:-1!==i.BOTTOMSTART.indexOf(t)?a=1:-1!==i.LEFTSTART.indexOf(t)?n=1:-1!==i.TOPSTART.indexOf(t)?a=-1:n=-1;return[n,a]}(f,r,e),p=[s(t,e,[-h[0],-h[1]])],d=t.z.length,m=t.z[0].length,g=e.slice(),v=h.slice();for(c=0;c<1e4;c++){if(f>20?(f=i.CHOOSESADDLE[f][(h[0]||h[1])<0?0:1],t.crossings[u]=i.SADDLEREMAINDER[f]):delete t.crossings[u],!(h=i.NEWDELTA[f])){n.log("Found bad marching index:",f,e,t.level);break}p.push(s(t,e,h)),e[0]+=h[0],e[1]+=h[1],u=e.join(","),a(p[p.length-1],p[p.length-2],o,l)&&p.pop();var y=h[0]&&(e[0]<0||e[0]>m-2)||h[1]&&(e[1]<0||e[1]>d-2);if(e[0]===g[0]&&e[1]===g[1]&&h[0]===v[0]&&h[1]===v[1]||r&&y)break;f=t.crossings[u]}1e4===c&&n.log("Infinite loop in contour?");var x,b,_,w,T,k,A,M,S,E,L,C,P,I,O,z=a(p[0],p[p.length-1],o,l),D=0,R=.2*t.smoothing,F=[],B=0;for(c=1;c=B;c--)if((x=F[c])=B&&x+F[b]M&&S--,t.edgepaths[S]=L.concat(p,E));break}V||(t.edgepaths[M]=p.concat(E))}for(M=0;Mt?0:1)+(e[0][1]>t?0:2)+(e[1][1]>t?0:4)+(e[1][0]>t?0:8);return 5===r||10===r?t>(e[0][0]+e[0][1]+e[1][0]+e[1][1])/4?5===r?713:1114:5===r?104:208:15===r?0:r}e.exports=function(t){var e,r,a,o,s,l,c,u,f,h=t[0].z,p=h.length,d=h[0].length,m=2===p||2===d;for(r=0;r=0&&(n=y,s=l):Math.abs(r[1]-n[1])<.01?Math.abs(r[1]-y[1])<.01&&(y[0]-r[0])*(n[0]-y[0])>=0&&(n=y,s=l):i.log("endpt to newendpt is not vert. or horz.",r,n,y)}if(r=n,s>=0)break;f+="L"+n}if(s===t.edgepaths.length){i.log("unclosed perimeter path");break}h=s,(d=-1===p.indexOf(h))&&(h=p[0],f+="Z")}for(h=0;hn.center?n.right-s:s-n.left)/(u+Math.abs(Math.sin(c)*o)),p=(l>n.middle?n.bottom-l:l-n.top)/(Math.abs(f)+Math.cos(c)*o);if(h<1||p<1)return 1/0;var d=v.EDGECOST*(1/(h-1)+1/(p-1));d+=v.ANGLECOST*c*c;for(var m=s-u,g=l-f,y=s+u,x=l+f,b=0;b2*v.MAXCOST)break;p&&(s/=2),l=(o=c-s/2)+1.5*s}if(h<=v.MAXCOST)return u},r.addLabelData=function(t,e,r,n){var i=e.fontSize,a=e.width+i/3,o=Math.max(0,e.height-i/3),s=t.x,l=t.y,c=t.theta,u=Math.sin(c),f=Math.cos(c),h=function(t,e){return[s+t*f-e*u,l+t*u+e*f]},p=[h(-a/2,-o/2),h(-a/2,o/2),h(a/2,o/2),h(a/2,-o/2)];r.push({text:e.text,x:s,y:l,dy:e.dy,theta:c,level:e.level,width:a,height:o}),n.push(p)},r.drawLabels=function(t,e,r,a,o){var l=t.selectAll("text").data(e,(function(t){return t.text+","+t.x+","+t.y+","+t.theta}));if(l.exit().remove(),l.enter().append("text").attr({"data-notex":1,"text-anchor":"middle"}).each((function(t){var e=t.x+Math.sin(t.theta)*t.dy,i=t.y-Math.cos(t.theta)*t.dy;n.select(this).text(t.text).attr({x:e,y:i,transform:"rotate("+180*t.theta/Math.PI+" "+e+" "+i+")"}).call(s.convertToTspans,r)})),o){for(var c="",u=0;ur.end&&(r.start=r.end=(r.start+r.end)/2),t._input.contours||(t._input.contours={}),i.extendFlat(t._input.contours,{start:r.start,end:r.end,size:r.size}),t._input.autocontour=!0}else if("constraint"!==r.type){var c,u=r.start,f=r.end,h=t._input.contours;if(u>f&&(r.start=h.start=f,f=r.end=h.end=u,u=r.start),!(r.size>0))c=u===f?1:a(u,f,t.ncontours).dtick,h.size=r.size=c}}},{"../../lib":498,"../../plots/cartesian/axes":549}],750:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../components/drawing"),a=t("../heatmap/style"),o=t("./make_color_map");e.exports=function(t){var e=n.select(t).selectAll("g.contour");e.style("opacity",(function(t){return t[0].trace.opacity})),e.each((function(t){var e=n.select(this),r=t[0].trace,a=r.contours,s=r.line,l=a.size||1,c=a.start,u="constraint"===a.type,f=!u&&"lines"===a.coloring,h=!u&&"fill"===a.coloring,p=f||h?o(r):null;e.selectAll("g.contourlevel").each((function(t){n.select(this).selectAll("path").call(i.lineGroupStyle,s.width,f?p(t.level):s.color,s.dash)}));var d=a.labelfont;if(e.selectAll("g.contourlabels text").each((function(t){i.font(n.select(this),{family:d.family,size:d.size,color:d.color||(f?p(t.level):s.color)})})),u)e.selectAll("g.contourfill path").style("fill",r.fillcolor);else if(h){var m;e.selectAll("g.contourfill path").style("fill",(function(t){return void 0===m&&(m=t.level),p(t.level+.5*l)})),void 0===m&&(m=c),e.selectAll("g.contourbg path").style("fill",p(m-.5*l))}})),a(t)}},{"../../components/drawing":383,"../heatmap/style":800,"./make_color_map":746,"@plotly/d3":58}],751:[function(t,e,r){"use strict";var n=t("../../components/colorscale/defaults"),i=t("./label_defaults");e.exports=function(t,e,r,a,o){var s,l=r("contours.coloring"),c="";"fill"===l&&(s=r("contours.showlines")),!1!==s&&("lines"!==l&&(c=r("line.color","#000")),r("line.width",.5),r("line.dash")),"none"!==l&&(!0!==t.showlegend&&(e.showlegend=!1),e._dfltShowLegend=!1,n(t,e,a,r,{prefix:"",cLetter:"z"})),r("line.smoothing"),i(r,a,c,o)}},{"../../components/colorscale/defaults":371,"./label_defaults":745}],752:[function(t,e,r){"use strict";var n=t("../heatmap/attributes"),i=t("../contour/attributes"),a=t("../../components/colorscale/attributes"),o=t("../../lib/extend").extendFlat,s=i.contours;e.exports=o({carpet:{valType:"string",editType:"calc"},z:n.z,a:n.x,a0:n.x0,da:n.dx,b:n.y,b0:n.y0,db:n.dy,text:n.text,hovertext:n.hovertext,transpose:n.transpose,atype:n.xtype,btype:n.ytype,fillcolor:i.fillcolor,autocontour:i.autocontour,ncontours:i.ncontours,contours:{type:s.type,start:s.start,end:s.end,size:s.size,coloring:{valType:"enumerated",values:["fill","lines","none"],dflt:"fill",editType:"calc"},showlines:s.showlines,showlabels:s.showlabels,labelfont:s.labelfont,labelformat:s.labelformat,operation:s.operation,value:s.value,editType:"calc",impliedEdits:{autocontour:!1}},line:{color:i.line.color,width:i.line.width,dash:i.line.dash,smoothing:i.line.smoothing,editType:"plot"},transforms:void 0},a("",{cLetter:"z",autoColorDflt:!1}))},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../contour/attributes":730,"../heatmap/attributes":787}],753:[function(t,e,r){"use strict";var n=t("../../components/colorscale/calc"),i=t("../../lib"),a=t("../heatmap/convert_column_xyz"),o=t("../heatmap/clean_2d_array"),s=t("../heatmap/interp2d"),l=t("../heatmap/find_empties"),c=t("../heatmap/make_bound_array"),u=t("./defaults"),f=t("../carpet/lookup_carpetid"),h=t("../contour/set_contours");e.exports=function(t,e){var r=e._carpetTrace=f(t,e);if(r&&r.visible&&"legendonly"!==r.visible){if(!e.a||!e.b){var p=t.data[r.index],d=t.data[e.index];d.a||(d.a=p.a),d.b||(d.b=p.b),u(d,e,e._defaultColor,t._fullLayout)}var m=function(t,e){var r,u,f,h,p,d,m,g=e._carpetTrace,v=g.aaxis,y=g.baxis;v._minDtick=0,y._minDtick=0,i.isArray1D(e.z)&&a(e,v,y,"a","b",["z"]);r=e._a=e._a||e.a,h=e._b=e._b||e.b,r=r?v.makeCalcdata(e,"_a"):[],h=h?y.makeCalcdata(e,"_b"):[],u=e.a0||0,f=e.da||1,p=e.b0||0,d=e.db||1,m=e._z=o(e._z||e.z,e.transpose),e._emptypoints=l(m),s(m,e._emptypoints);var x=i.maxRowLength(m),b="scaled"===e.xtype?"":r,_=c(e,b,u,f,x,v),w="scaled"===e.ytype?"":h,T=c(e,w,p,d,m.length,y),k={a:_,b:T,z:m};"levels"===e.contours.type&&"none"!==e.contours.coloring&&n(t,e,{vals:m,containerStr:"",cLetter:"z"});return[k]}(t,e);return h(e,e._z),m}}},{"../../components/colorscale/calc":369,"../../lib":498,"../carpet/lookup_carpetid":703,"../contour/set_contours":749,"../heatmap/clean_2d_array":789,"../heatmap/convert_column_xyz":791,"../heatmap/find_empties":793,"../heatmap/interp2d":796,"../heatmap/make_bound_array":798,"./defaults":754}],754:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../heatmap/xyz_defaults"),a=t("./attributes"),o=t("../contour/constraint_defaults"),s=t("../contour/contours_defaults"),l=t("../contour/style_defaults");e.exports=function(t,e,r,c){function u(r,i){return n.coerce(t,e,a,r,i)}if(u("carpet"),t.a&&t.b){if(!i(t,e,u,c,"a","b"))return void(e.visible=!1);u("text"),"constraint"===u("contours.type")?o(t,e,u,c,r,{hasHover:!1}):(s(t,e,u,(function(r){return n.coerce2(t,e,a,r)})),l(t,e,u,c,{hasHover:!1}))}else e._defaultColor=r,e._length=null}},{"../../lib":498,"../contour/constraint_defaults":735,"../contour/contours_defaults":737,"../contour/style_defaults":751,"../heatmap/xyz_defaults":802,"./attributes":752}],755:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../contour/colorbar"),calc:t("./calc"),plot:t("./plot"),style:t("../contour/style"),moduleType:"trace",name:"contourcarpet",basePlotModule:t("../../plots/cartesian"),categories:["cartesian","svg","carpet","contour","symbols","showLegend","hasLines","carpetDependent","noHover","noSortingByValue"],meta:{}}},{"../../plots/cartesian":563,"../contour/colorbar":733,"../contour/style":750,"./attributes":752,"./calc":753,"./defaults":754,"./plot":756}],756:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../carpet/map_1d_array"),a=t("../carpet/makepath"),o=t("../../components/drawing"),s=t("../../lib"),l=t("../contour/make_crossings"),c=t("../contour/find_all_paths"),u=t("../contour/plot"),f=t("../contour/constants"),h=t("../contour/convert_to_constraints"),p=t("../contour/empty_pathinfo"),d=t("../contour/close_boundaries"),m=t("../carpet/lookup_carpetid"),g=t("../carpet/axis_aligned_line");function v(t,e,r){var n=t.getPointAtLength(e),i=t.getPointAtLength(r),a=i.x-n.x,o=i.y-n.y,s=Math.sqrt(a*a+o*o);return[a/s,o/s]}function y(t){var e=Math.sqrt(t[0]*t[0]+t[1]*t[1]);return[t[0]/e,t[1]/e]}function x(t,e){var r=Math.abs(t[0]*e[0]+t[1]*e[1]);return Math.sqrt(1-r*r)/r}e.exports=function(t,e,r,b){var _=e.xaxis,w=e.yaxis;s.makeTraceGroups(b,r,"contour").each((function(r){var b=n.select(this),T=r[0],k=T.trace,A=k._carpetTrace=m(t,k),M=t.calcdata[A.index][0];if(A.visible&&"legendonly"!==A.visible){var S=T.a,E=T.b,L=k.contours,C=p(L,e,T),P="constraint"===L.type,I=L._operation,O=P?"="===I?"lines":"fill":L.coloring,z=[[S[0],E[E.length-1]],[S[S.length-1],E[E.length-1]],[S[S.length-1],E[0]],[S[0],E[0]]];l(C);var D=1e-8*(S[S.length-1]-S[0]),R=1e-8*(E[E.length-1]-E[0]);c(C,D,R);var F,B,N,j,U=C;"constraint"===L.type&&(U=h(C,I)),function(t,e){var r,n,i,a,o,s,l,c,u;for(r=0;r=0;j--)F=M.clipsegments[j],B=i([],F.x,_.c2p),N=i([],F.y,w.c2p),B.reverse(),N.reverse(),V.push(a(B,N,F.bicubic));var H="M"+V.join("L")+"Z";!function(t,e,r,n,o,l){var c,u,f,h,p=s.ensureSingle(t,"g","contourbg").selectAll("path").data("fill"!==l||o?[]:[0]);p.enter().append("path"),p.exit().remove();var d=[];for(h=0;h=0&&(h=L,d=m):Math.abs(f[1]-h[1])=0&&(h=L,d=m):s.log("endpt to newendpt is not vert. or horz.",f,h,L)}if(d>=0)break;y+=S(f,h),f=h}if(d===e.edgepaths.length){s.log("unclosed perimeter path");break}u=d,(b=-1===x.indexOf(u))&&(u=x[0],y+=S(f,h)+"Z",f=null)}for(u=0;ug&&(n.max=g);n.len=n.max-n.min}(this,r,t,n,c,e.height),!(n.len<(e.width+e.height)*f.LABELMIN)))for(var i=Math.min(Math.ceil(n.len/I),f.LABELMAX),a=0;a0?+p[u]:0),f.push({type:"Feature",geometry:{type:"Point",coordinates:v},properties:y})}}var b=o.extractOpts(e),_=b.reversescale?o.flipScale(b.colorscale):b.colorscale,w=_[0][1],T=["interpolate",["linear"],["heatmap-density"],0,a.opacity(w)<1?w:a.addOpacity(w,0)];for(u=1;u<_.length;u++)T.push(_[u][0],_[u][1]);var k=["interpolate",["linear"],["get","z"],b.min,0,b.max,1];return i.extendFlat(c.heatmap.paint,{"heatmap-weight":d?k:1/(b.max-b.min),"heatmap-color":T,"heatmap-radius":m?{type:"identity",property:"r"}:e.radius,"heatmap-opacity":e.opacity}),c.geojson={type:"FeatureCollection",features:f},c.heatmap.layout.visibility="visible",c}},{"../../components/color":361,"../../components/colorscale":373,"../../constants/numerical":474,"../../lib":498,"../../lib/geojson_utils":492,"fast-isnumeric":185}],760:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../components/colorscale/defaults"),a=t("./attributes");e.exports=function(t,e,r,o){function s(r,i){return n.coerce(t,e,a,r,i)}var l=s("lon")||[],c=s("lat")||[],u=Math.min(l.length,c.length);u?(e._length=u,s("z"),s("radius"),s("below"),s("text"),s("hovertext"),s("hovertemplate"),i(t,e,o,s,{prefix:"",cLetter:"z"})):e.visible=!1}},{"../../components/colorscale/defaults":371,"../../lib":498,"./attributes":757}],761:[function(t,e,r){"use strict";e.exports=function(t,e){return t.lon=e.lon,t.lat=e.lat,t.z=e.z,t}},{}],762:[function(t,e,r){"use strict";var n=t("../../plots/cartesian/axes"),i=t("../scattermapbox/hover").hoverPoints,a=t("../scattermapbox/hover").getExtraText;e.exports=function(t,e,r){var o=i(t,e,r);if(o){var s=o[0],l=s.cd,c=l[0].trace,u=l[s.index];if(delete s.color,"z"in u){var f=s.subplot.mockAxis;s.z=u.z,s.zLabel=n.tickText(f,f.c2l(u.z),"hover").text}return s.extraText=a(c,u,l[0].t.labels),[s]}}},{"../../plots/cartesian/axes":549,"../scattermapbox/hover":993}],763:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../heatmap/colorbar"),formatLabels:t("../scattermapbox/format_labels"),calc:t("./calc"),plot:t("./plot"),hoverPoints:t("./hover"),eventData:t("./event_data"),getBelow:function(t,e){for(var r=e.getMapLayers(),n=0;n=0;r--)t.removeLayer(e[r][1])},o.dispose=function(){var t=this.subplot.map;this._removeLayers(),t.removeSource(this.sourceId)},e.exports=function(t,e){var r=e[0].trace,i=new a(t,r.uid),o=i.sourceId,s=n(e),l=i.below=t.belowLookup["trace-"+r.uid];return t.map.addSource(o,{type:"geojson",data:s.geojson}),i._addLayers(s,l),i}},{"../../plots/mapbox/constants":606,"./convert":759}],765:[function(t,e,r){"use strict";var n=t("../../lib");e.exports=function(t,e){for(var r=0;r"),l.color=function(t,e){var r=t.marker,i=e.mc||r.color,a=e.mlc||r.line.color,o=e.mlw||r.line.width;if(n(i))return i;if(n(a)&&o)return a}(u,h),[l]}}},{"../../components/color":361,"../../lib":498,"../bar/hover":650}],773:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults").supplyDefaults,crossTraceDefaults:t("./defaults").crossTraceDefaults,supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc"),crossTraceCalc:t("./cross_trace_calc"),plot:t("./plot"),style:t("./style").style,hoverPoints:t("./hover"),eventData:t("./event_data"),selectPoints:t("../bar/select"),moduleType:"trace",name:"funnel",basePlotModule:t("../../plots/cartesian"),categories:["bar-like","cartesian","svg","oriented","showLegend","zoomScale"],meta:{}}},{"../../plots/cartesian":563,"../bar/select":655,"./attributes":766,"./calc":767,"./cross_trace_calc":769,"./defaults":770,"./event_data":771,"./hover":772,"./layout_attributes":774,"./layout_defaults":775,"./plot":776,"./style":777}],774:[function(t,e,r){"use strict";e.exports={funnelmode:{valType:"enumerated",values:["stack","group","overlay"],dflt:"stack",editType:"calc"},funnelgap:{valType:"number",min:0,max:1,editType:"calc"},funnelgroupgap:{valType:"number",min:0,max:1,dflt:0,editType:"calc"}}},{}],775:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes");e.exports=function(t,e,r){var a=!1;function o(r,a){return n.coerce(t,e,i,r,a)}for(var s=0;s path").each((function(t){if(!t.isBlank){var e=s.marker;n.select(this).call(a.fill,t.mc||e.color).call(a.stroke,t.mlc||e.line.color).call(i.dashLine,e.line.dash,t.mlw||e.line.width).style("opacity",s.selectedpoints&&!t.selected?o:1)}})),c(r,s,t),r.selectAll(".regions").each((function(){n.select(this).selectAll("path").style("stroke-width",0).call(a.fill,s.connector.fillcolor)})),r.selectAll(".lines").each((function(){var t=s.connector.line;i.lineGroupStyle(n.select(this).selectAll("path"),t.width,t.color,t.dash)}))}))}}},{"../../components/color":361,"../../components/drawing":383,"../../constants/interactions":473,"../bar/style":657,"../bar/uniform_text":659,"@plotly/d3":58}],778:[function(t,e,r){"use strict";var n=t("../pie/attributes"),i=t("../../plots/attributes"),a=t("../../plots/domain").attributes,o=t("../../plots/template_attributes").hovertemplateAttrs,s=t("../../plots/template_attributes").texttemplateAttrs,l=t("../../lib/extend").extendFlat;e.exports={labels:n.labels,label0:n.label0,dlabel:n.dlabel,values:n.values,marker:{colors:n.marker.colors,line:{color:l({},n.marker.line.color,{dflt:null}),width:l({},n.marker.line.width,{dflt:1}),editType:"calc"},editType:"calc"},text:n.text,hovertext:n.hovertext,scalegroup:l({},n.scalegroup,{}),textinfo:l({},n.textinfo,{flags:["label","text","value","percent"]}),texttemplate:s({editType:"plot"},{keys:["label","color","value","text","percent"]}),hoverinfo:l({},i.hoverinfo,{flags:["label","text","value","percent","name"]}),hovertemplate:o({},{keys:["label","color","value","text","percent"]}),textposition:l({},n.textposition,{values:["inside","none"],dflt:"inside"}),textfont:n.textfont,insidetextfont:n.insidetextfont,title:{text:n.title.text,font:n.title.font,position:l({},n.title.position,{values:["top left","top center","top right"],dflt:"top center"}),editType:"plot"},domain:a({name:"funnelarea",trace:!0,editType:"calc"}),aspectratio:{valType:"number",min:0,dflt:1,editType:"plot"},baseratio:{valType:"number",min:0,max:1,dflt:.333,editType:"plot"}}},{"../../lib/extend":488,"../../plots/attributes":545,"../../plots/domain":579,"../../plots/template_attributes":628,"../pie/attributes":896}],779:[function(t,e,r){"use strict";var n=t("../../plots/plots");r.name="funnelarea",r.plot=function(t,e,i,a){n.plotBasePlot(r.name,t,e,i,a)},r.clean=function(t,e,i,a){n.cleanBasePlot(r.name,t,e,i,a)}},{"../../plots/plots":614}],780:[function(t,e,r){"use strict";var n=t("../pie/calc");e.exports={calc:function(t,e){return n.calc(t,e)},crossTraceCalc:function(t){n.crossTraceCalc(t,{type:"funnelarea"})}}},{"../pie/calc":898}],781:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./attributes"),a=t("../../plots/domain").defaults,o=t("../bar/defaults").handleText,s=t("../pie/defaults").handleLabelsAndValues;e.exports=function(t,e,r,l){function c(r,a){return n.coerce(t,e,i,r,a)}var u=c("labels"),f=c("values"),h=s(u,f),p=h.len;if(e._hasLabels=h.hasLabels,e._hasValues=h.hasValues,!e._hasLabels&&e._hasValues&&(c("label0"),c("dlabel")),p){e._length=p,c("marker.line.width")&&c("marker.line.color",l.paper_bgcolor),c("marker.colors"),c("scalegroup");var d,m=c("text"),g=c("texttemplate");if(g||(d=c("textinfo",Array.isArray(m)?"text+percent":"percent")),c("hovertext"),c("hovertemplate"),g||d&&"none"!==d){var v=c("textposition");o(t,e,l,c,v,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1})}a(e,l,c),c("title.text")&&(c("title.position"),n.coerceFont(c,"title.font",l.font)),c("aspectratio"),c("baseratio")}else e.visible=!1}},{"../../lib":498,"../../plots/domain":579,"../bar/defaults":647,"../pie/defaults":899,"./attributes":778}],782:[function(t,e,r){"use strict";e.exports={moduleType:"trace",name:"funnelarea",basePlotModule:t("./base_plot"),categories:["pie-like","funnelarea","showLegend"],attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults"),supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc").calc,crossTraceCalc:t("./calc").crossTraceCalc,plot:t("./plot"),style:t("./style"),styleOne:t("../pie/style_one"),meta:{}}},{"../pie/style_one":907,"./attributes":778,"./base_plot":779,"./calc":780,"./defaults":781,"./layout_attributes":783,"./layout_defaults":784,"./plot":785,"./style":786}],783:[function(t,e,r){"use strict";var n=t("../pie/layout_attributes").hiddenlabels;e.exports={hiddenlabels:n,funnelareacolorway:{valType:"colorlist",editType:"calc"},extendfunnelareacolors:{valType:"boolean",dflt:!0,editType:"calc"}}},{"../pie/layout_attributes":903}],784:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes");e.exports=function(t,e){function r(r,a){return n.coerce(t,e,i,r,a)}r("hiddenlabels"),r("funnelareacolorway",e.colorway),r("extendfunnelareacolors")}},{"../../lib":498,"./layout_attributes":783}],785:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../components/drawing"),a=t("../../lib"),o=a.strScale,s=a.strTranslate,l=t("../../lib/svg_text_utils"),c=t("../bar/plot").toMoveInsideBar,u=t("../bar/uniform_text"),f=u.recordMinTextSize,h=u.clearMinTextSize,p=t("../pie/helpers"),d=t("../pie/plot"),m=d.attachFxHandlers,g=d.determineInsideTextFont,v=d.layoutAreas,y=d.prerenderTitles,x=d.positionTitleOutside,b=d.formatSliceLabel;function _(t,e){return"l"+(e[0]-t[0])+","+(e[1]-t[1])}e.exports=function(t,e){var r=t._fullLayout;h("funnelarea",r),y(e,t),v(e,r._size),a.makeTraceGroups(r._funnelarealayer,e,"trace").each((function(e){var u=n.select(this),h=e[0],d=h.trace;!function(t){if(!t.length)return;var e=t[0],r=e.trace,n=r.aspectratio,i=r.baseratio;i>.999&&(i=.999);var a,o=Math.pow(i,2),s=e.vTotal,l=s,c=s*o/(1-o)/s;function u(){var t,e={x:t=Math.sqrt(c),y:-t};return[e.x,e.y]}var f,h,p=[];for(p.push(u()),f=t.length-1;f>-1;f--)if(!(h=t[f]).hidden){var d=h.v/l;c+=d,p.push(u())}var m=1/0,g=-1/0;for(f=0;f-1;f--)if(!(h=t[f]).hidden){var A=p[k+=1][0],M=p[k][1];h.TL=[-A,M],h.TR=[A,M],h.BL=w,h.BR=T,h.pxmid=(S=h.TR,E=h.BR,[.5*(S[0]+E[0]),.5*(S[1]+E[1])]),w=h.TL,T=h.TR}var S,E}(e),u.each((function(){var u=n.select(this).selectAll("g.slice").data(e);u.enter().append("g").classed("slice",!0),u.exit().remove(),u.each((function(o,s){if(o.hidden)n.select(this).selectAll("path,g").remove();else{o.pointNumber=o.i,o.curveNumber=d.index;var u=h.cx,v=h.cy,y=n.select(this),x=y.selectAll("path.surface").data([o]);x.enter().append("path").classed("surface",!0).style({"pointer-events":"all"}),y.call(m,t,e);var w="M"+(u+o.TR[0])+","+(v+o.TR[1])+_(o.TR,o.BR)+_(o.BR,o.BL)+_(o.BL,o.TL)+"Z";x.attr("d",w),b(t,o,h);var T=p.castOption(d.textposition,o.pts),k=y.selectAll("g.slicetext").data(o.text&&"none"!==T?[0]:[]);k.enter().append("g").classed("slicetext",!0),k.exit().remove(),k.each((function(){var h=a.ensureSingle(n.select(this),"text","",(function(t){t.attr("data-notex",1)})),p=a.ensureUniformFontSize(t,g(d,o,r.font));h.text(o.text).attr({class:"slicetext",transform:"","text-anchor":"middle"}).call(i.font,p).call(l.convertToTspans,t);var m,y,x,b=i.bBox(h.node()),_=Math.min(o.BL[1],o.BR[1])+v,w=Math.max(o.TL[1],o.TR[1])+v;y=Math.max(o.TL[0],o.BL[0])+u,x=Math.min(o.TR[0],o.BR[0])+u,(m=c(y,x,_,w,b,{isHorizontal:!0,constrained:!0,angle:0,anchor:"middle"})).fontSize=p.size,f(d.type,m,r),e[s].transform=m,h.attr("transform",a.getTextTransform(m))}))}}));var v=n.select(this).selectAll("g.titletext").data(d.title.text?[0]:[]);v.enter().append("g").classed("titletext",!0),v.exit().remove(),v.each((function(){var e=a.ensureSingle(n.select(this),"text","",(function(t){t.attr("data-notex",1)})),c=d.title.text;d._meta&&(c=a.templateString(c,d._meta)),e.text(c).attr({class:"titletext",transform:"","text-anchor":"middle"}).call(i.font,d.title.font).call(l.convertToTspans,t);var u=x(h,r._size);e.attr("transform",s(u.x,u.y)+o(Math.min(1,u.scale))+s(u.tx,u.ty))}))}))}))}},{"../../components/drawing":383,"../../lib":498,"../../lib/svg_text_utils":524,"../bar/plot":654,"../bar/uniform_text":659,"../pie/helpers":901,"../pie/plot":905,"@plotly/d3":58}],786:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../pie/style_one"),a=t("../bar/uniform_text").resizeText;e.exports=function(t){var e=t._fullLayout._funnelarealayer.selectAll(".trace");a(t,e,"funnelarea"),e.each((function(t){var e=t[0].trace,r=n.select(this);r.style({opacity:e.opacity}),r.selectAll("path.surface").each((function(t){n.select(this).call(i,t,e)}))}))}},{"../bar/uniform_text":659,"../pie/style_one":907,"@plotly/d3":58}],787:[function(t,e,r){"use strict";var n=t("../scatter/attributes"),i=t("../../plots/attributes"),a=t("../../plots/font_attributes"),o=t("../../plots/cartesian/axis_format_attributes").axisHoverFormat,s=t("../../plots/template_attributes").hovertemplateAttrs,l=t("../../plots/template_attributes").texttemplateAttrs,c=t("../../components/colorscale/attributes"),u=t("../../lib/extend").extendFlat;e.exports=u({z:{valType:"data_array",editType:"calc"},x:u({},n.x,{impliedEdits:{xtype:"array"}}),x0:u({},n.x0,{impliedEdits:{xtype:"scaled"}}),dx:u({},n.dx,{impliedEdits:{xtype:"scaled"}}),y:u({},n.y,{impliedEdits:{ytype:"array"}}),y0:u({},n.y0,{impliedEdits:{ytype:"scaled"}}),dy:u({},n.dy,{impliedEdits:{ytype:"scaled"}}),xperiod:u({},n.xperiod,{impliedEdits:{xtype:"scaled"}}),yperiod:u({},n.yperiod,{impliedEdits:{ytype:"scaled"}}),xperiod0:u({},n.xperiod0,{impliedEdits:{xtype:"scaled"}}),yperiod0:u({},n.yperiod0,{impliedEdits:{ytype:"scaled"}}),xperiodalignment:u({},n.xperiodalignment,{impliedEdits:{xtype:"scaled"}}),yperiodalignment:u({},n.yperiodalignment,{impliedEdits:{ytype:"scaled"}}),text:{valType:"data_array",editType:"calc"},hovertext:{valType:"data_array",editType:"calc"},transpose:{valType:"boolean",dflt:!1,editType:"calc"},xtype:{valType:"enumerated",values:["array","scaled"],editType:"calc+clearAxisTypes"},ytype:{valType:"enumerated",values:["array","scaled"],editType:"calc+clearAxisTypes"},zsmooth:{valType:"enumerated",values:["fast","best",!1],dflt:!1,editType:"calc"},hoverongaps:{valType:"boolean",dflt:!0,editType:"none"},connectgaps:{valType:"boolean",editType:"calc"},xgap:{valType:"number",dflt:0,min:0,editType:"plot"},ygap:{valType:"number",dflt:0,min:0,editType:"plot"},xhoverformat:o("x"),yhoverformat:o("y"),zhoverformat:o("z",1),hovertemplate:s(),texttemplate:l({arrayOk:!1,editType:"plot"},{keys:["x","y","z","text"]}),textfont:a({editType:"plot",autoSize:!0,autoColor:!0,colorEditType:"style"}),showlegend:u({},i.showlegend,{dflt:!1})},{transforms:void 0},c("",{cLetter:"z",autoColorDflt:!1}))},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plots/attributes":545,"../../plots/cartesian/axis_format_attributes":552,"../../plots/font_attributes":580,"../../plots/template_attributes":628,"../scatter/attributes":922}],788:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../lib"),a=t("../../plots/cartesian/axes"),o=t("../../plots/cartesian/align_period"),s=t("../histogram2d/calc"),l=t("../../components/colorscale/calc"),c=t("./convert_column_xyz"),u=t("./clean_2d_array"),f=t("./interp2d"),h=t("./find_empties"),p=t("./make_bound_array"),d=t("../../constants/numerical").BADNUM;function m(t){for(var e=[],r=t.length,n=0;nD){O("x scale is not linear");break}}if(x.length&&"fast"===P){var R=(x[x.length-1]-x[0])/(x.length-1),F=Math.abs(R/100);for(k=0;kF){O("y scale is not linear");break}}}var B=i.maxRowLength(T),N="scaled"===e.xtype?"":r,j=p(e,N,g,v,B,M),U="scaled"===e.ytype?"":x,V=p(e,U,b,_,T.length,S);C||(e._extremes[M._id]=a.findExtremes(M,j),e._extremes[S._id]=a.findExtremes(S,V));var H={x:j,y:V,z:T,text:e._text||e.text,hovertext:e._hovertext||e.hovertext};if(e.xperiodalignment&&y&&(H.orig_x=y),e.yperiodalignment&&w&&(H.orig_y=w),N&&N.length===j.length-1&&(H.xCenter=N),U&&U.length===V.length-1&&(H.yCenter=U),L&&(H.xRanges=A.xRanges,H.yRanges=A.yRanges,H.pts=A.pts),E||l(t,e,{vals:T,cLetter:"z"}),E&&e.contours&&"heatmap"===e.contours.coloring){var q={type:"contour"===e.type?"heatmap":"histogram2d",xcalendar:e.xcalendar,ycalendar:e.ycalendar};H.xfill=p(q,N,g,v,B,M),H.yfill=p(q,U,b,_,T.length,S)}return[H]}},{"../../components/colorscale/calc":369,"../../constants/numerical":474,"../../lib":498,"../../plots/cartesian/align_period":546,"../../plots/cartesian/axes":549,"../../registry":633,"../histogram2d/calc":821,"./clean_2d_array":789,"./convert_column_xyz":791,"./find_empties":793,"./interp2d":796,"./make_bound_array":798}],789:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("../../lib"),a=t("../../constants/numerical").BADNUM;e.exports=function(t,e,r,o){var s,l,c,u,f,h;function p(t){if(n(t))return+t}if(e&&e.transpose){for(s=0,f=0;f=0;o--)(s=((f[[(r=(a=h[o])[0])-1,i=a[1]]]||m)[2]+(f[[r+1,i]]||m)[2]+(f[[r,i-1]]||m)[2]+(f[[r,i+1]]||m)[2])/20)&&(l[a]=[r,i,s],h.splice(o,1),c=!0);if(!c)throw"findEmpties iterated with no new neighbors";for(a in l)f[a]=l[a],u.push(l[a])}return u.sort((function(t,e){return e[2]-t[2]}))}},{"../../lib":498}],794:[function(t,e,r){"use strict";var n=t("../../components/fx"),i=t("../../lib"),a=t("../../plots/cartesian/axes"),o=t("../../components/colorscale").extractOpts;e.exports=function(t,e,r,s,l){l||(l={});var c,u,f,h,p=l.isContour,d=t.cd[0],m=d.trace,g=t.xa,v=t.ya,y=d.x,x=d.y,b=d.z,_=d.xCenter,w=d.yCenter,T=d.zmask,k=m.zhoverformat,A=y,M=x;if(!1!==t.index){try{f=Math.round(t.index[1]),h=Math.round(t.index[0])}catch(e){return void i.error("Error hovering on heatmap, pointNumber must be [row,col], found:",t.index)}if(f<0||f>=b[0].length||h<0||h>b.length)return}else{if(n.inbox(e-y[0],e-y[y.length-1],0)>0||n.inbox(r-x[0],r-x[x.length-1],0)>0)return;if(p){var S;for(A=[2*y[0]-y[1]],S=1;Sm&&(v=Math.max(v,Math.abs(t[a][o]-d)/(g-m))))}return v}e.exports=function(t,e){var r,i=1;for(o(t,e),r=0;r.01;r++)i=o(t,e,a(i));return i>.01&&n.log("interp2d didn't converge quickly",i),t}},{"../../lib":498}],797:[function(t,e,r){"use strict";var n=t("../../lib");e.exports=function(t,e){t("texttemplate");var r=n.extendFlat({},e.font,{color:"auto",size:"auto"});n.coerceFont(t,"textfont",r)}},{"../../lib":498}],798:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../lib").isArrayOrTypedArray;e.exports=function(t,e,r,a,o,s){var l,c,u,f=[],h=n.traceIs(t,"contour"),p=n.traceIs(t,"histogram"),d=n.traceIs(t,"gl2d");if(i(e)&&e.length>1&&!p&&"category"!==s.type){var m=e.length;if(!(m<=o))return h?e.slice(0,o):e.slice(0,o+1);if(h||d)f=e.slice(0,o);else if(1===o)f=[e[0]-.5,e[0]+.5];else{for(f=[1.5*e[0]-.5*e[1]],u=1;u0;)_=w.c2p(R[S]),S--;for(_0;)M=T.c2p(F[S]),S--;if(MGt||Gt>T._length))for(E=Ut;EWt||Wt>w._length)){var Xt=u({x:Yt,y:qt},I,t._fullLayout);Xt.x=Yt,Xt.y=qt;var Zt=P.z[S][E];void 0===Zt?(Xt.z="",Xt.zLabel=""):(Xt.z=Zt,Xt.zLabel=s.tickText(Ft,Zt,"hover").text);var Jt=P.text&&P.text[S]&&P.text[S][E];void 0!==Jt&&!1!==Jt||(Jt=""),Xt.text=Jt;var Kt=l.texttemplateString(Dt,Xt,t._fullLayout._d3locale,Xt,I._meta||{});if(Kt){var Qt=Kt.split("
"),$t=Qt.length,te=0;for(L=0;L<$t;L++)te=Math.max(te,Qt[L].length);Ht.push({l:$t,c:te,t:Kt,x:Wt,y:Gt,z:Zt})}}}}var ee=I.textfont,re=ee.family,ne=ee.size,ie=t._fullLayout.font.size;if(!ne||"auto"===ne){var ae=1/0,oe=1/0,se=0,le=0;for(L=0;L0&&(a=!0);for(var l=0;la){var o=a-r[t];return r[t]=a,o}}return 0},max:function(t,e,r,i){var a=i[e];if(n(a)){if(a=Number(a),!n(r[t]))return r[t]=a,a;if(r[t]c?t>o?t>1.1*i?i:t>1.1*a?a:o:t>s?s:t>l?l:c:Math.pow(10,Math.floor(Math.log(t)/Math.LN10))}function p(t,e,r,n,a,s){if(n&&t>o){var l=d(e,a,s),c=d(r,a,s),u=t===i?0:1;return l[u]!==c[u]}return Math.floor(r/t)-Math.floor(e/t)>.1}function d(t,e,r){var n=e.c2d(t,i,r).split("-");return""===n[0]&&(n.unshift(),n[0]="-"+n[0]),n}e.exports=function(t,e,r,n,a){var s,l,c=-1.1*e,h=-.1*e,p=t-h,d=r[0],m=r[1],g=Math.min(f(d+h,d+p,n,a),f(m+h,m+p,n,a)),v=Math.min(f(d+c,d+h,n,a),f(m+c,m+h,n,a));if(g>v&&vo){var y=s===i?1:6,x=s===i?"M12":"M1";return function(e,r){var o=n.c2d(e,i,a),s=o.indexOf("-",y);s>0&&(o=o.substr(0,s));var c=n.d2c(o,0,a);if(cr.r2l(B)&&(j=o.tickIncrement(j,b.size,!0,p)),z.start=r.l2r(j),F||i.nestedProperty(e,v+".start").set(z.start)}var U=b.end,V=r.r2l(O.end),H=void 0!==V;if((b.endFound||H)&&V!==r.r2l(U)){var q=H?V:i.aggNums(Math.max,null,d);z.end=r.l2r(q),H||i.nestedProperty(e,v+".start").set(z.end)}var G="autobin"+s;return!1===e._input[G]&&(e._input[v]=i.extendFlat({},e[v]||{}),delete e._input[G],delete e[G]),[z,d]}e.exports={calc:function(t,e){var r,a,p,d,m=[],g=[],v="h"===e.orientation,y=o.getFromId(t,v?e.yaxis:e.xaxis),x=v?"y":"x",b={x:"y",y:"x"}[x],_=e[x+"calendar"],w=e.cumulative,T=h(t,e,y,x),k=T[0],A=T[1],M="string"==typeof k.size,S=[],E=M?S:k,L=[],C=[],P=[],I=0,O=e.histnorm,z=e.histfunc,D=-1!==O.indexOf("density");w.enabled&&D&&(O=O.replace(/ ?density$/,""),D=!1);var R,F="max"===z||"min"===z?null:0,B=l.count,N=c[O],j=!1,U=function(t){return y.r2c(t,0,_)};for(i.isArrayOrTypedArray(e[b])&&"count"!==z&&(R=e[b],j="avg"===z,B=l[z]),r=U(k.start),p=U(k.end)+(r-o.tickIncrement(r,k.size,!1,_))/1e6;r=0&&d=0;n--)s(n);else if("increasing"===e){for(n=1;n=0;n--)t[n]+=t[n+1];"exclude"===r&&(t.push(0),t.shift())}}(g,w.direction,w.currentbin);var K=Math.min(m.length,g.length),Q=[],$=0,tt=K-1;for(r=0;r=$;r--)if(g[r]){tt=r;break}for(r=$;r<=tt;r++)if(n(m[r])&&n(g[r])){var et={p:m[r],s:g[r],b:0};w.enabled||(et.pts=P[r],Y?et.ph0=et.ph1=P[r].length?A[P[r][0]]:m[r]:(e._computePh=!0,et.ph0=q(S[r]),et.ph1=q(S[r+1],!0))),Q.push(et)}return 1===Q.length&&(Q[0].width1=o.tickIncrement(Q[0].p,k.size,!1,_)-Q[0].p),s(Q,e),i.isArrayOrTypedArray(e.selectedpoints)&&i.tagSelected(Q,e,Z),Q},calcAllAutoBins:h}},{"../../lib":498,"../../plots/cartesian/axes":549,"../../registry":633,"../bar/arrays_to_calcdata":642,"./average":808,"./bin_functions":810,"./bin_label_vals":811,"./norm_functions":819,"fast-isnumeric":185}],813:[function(t,e,r){"use strict";e.exports={eventDataKeys:["binNumber"]}},{}],814:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../plots/cartesian/axis_ids"),a=t("../../registry").traceIs,o=t("../bar/defaults").handleGroupingDefaults,s=n.nestedProperty,l=t("../../plots/cartesian/constraints").getAxisGroup,c=[{aStr:{x:"xbins.start",y:"ybins.start"},name:"start"},{aStr:{x:"xbins.end",y:"ybins.end"},name:"end"},{aStr:{x:"xbins.size",y:"ybins.size"},name:"size"},{aStr:{x:"nbinsx",y:"nbinsy"},name:"nbins"}],u=["x","y"];e.exports=function(t,e){var r,f,h,p,d,m,g,v=e._histogramBinOpts={},y=[],x={},b=[];function _(t,e){return n.coerce(r._input,r,r._module.attributes,t,e)}function w(t){return"v"===t.orientation?"x":"y"}function T(t,r,a){var o=t.uid+"__"+a;r||(r=o);var s=function(t,r){return i.getFromTrace({_fullLayout:e},t,r).type}(t,a),l=t[a+"calendar"]||"",c=v[r],u=!0;c&&(s===c.axType&&l===c.calendar?(u=!1,c.traces.push(t),c.dirs.push(a)):(r=o,s!==c.axType&&n.warn(["Attempted to group the bins of trace",t.index,"set on a","type:"+s,"axis","with bins on","type:"+c.axType,"axis."].join(" ")),l!==c.calendar&&n.warn(["Attempted to group the bins of trace",t.index,"set with a",l,"calendar","with bins",c.calendar?"on a "+c.calendar+" calendar":"w/o a set calendar"].join(" ")))),u&&(v[r]={traces:[t],dirs:[a],axType:s,calendar:t[a+"calendar"]||""}),t["_"+a+"bingroup"]=r}for(d=0;dS&&T.splice(S,T.length-S),M.length>S&&M.splice(S,M.length-S);var E=[],L=[],C=[],P="string"==typeof w.size,I="string"==typeof A.size,O=[],z=[],D=P?O:w,R=I?z:A,F=0,B=[],N=[],j=e.histnorm,U=e.histfunc,V=-1!==j.indexOf("density"),H="max"===U||"min"===U?null:0,q=a.count,G=o[j],Y=!1,W=[],X=[],Z="z"in e?e.z:"marker"in e&&Array.isArray(e.marker.color)?e.marker.color:"";Z&&"count"!==U&&(Y="avg"===U,q=a[U]);var J=w.size,K=x(w.start),Q=x(w.end)+(K-i.tickIncrement(K,J,!1,v))/1e6;for(r=K;r=0&&p=0&&d-1,flipY:E.tiling.flip.indexOf("y")>-1,orientation:E.tiling.orientation,pad:{inner:E.tiling.pad},maxDepth:E._maxDepth}).descendants(),O=1/0,z=-1/0;I.forEach((function(t){var e=t.depth;e>=E._maxDepth?(t.x0=t.x1=(t.x0+t.x1)/2,t.y0=t.y1=(t.y0+t.y1)/2):(O=Math.min(O,e),z=Math.max(z,e))})),p=p.data(I,u.getPtId),E._maxVisibleLayers=isFinite(z)?z-O+1:0,p.enter().append("g").classed("slice",!0),T(p,!1,{},[m,g],x),p.order();var D=null;if(w&&M){var R=u.getPtId(M);p.each((function(t){null===D&&u.getPtId(t)===R&&(D={x0:t.x0,x1:t.x1,y0:t.y0,y1:t.y1})}))}var F=function(){return D||{x0:0,x1:m,y0:0,y1:g}},B=p;return w&&(B=B.transition().each("end",(function(){var e=n.select(this);u.setSliceCursor(e,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})}))),B.each((function(s){s._x0=v(s.x0),s._x1=v(s.x1),s._y0=y(s.y0),s._y1=y(s.y1),s._hoverX=v(s.x1-E.tiling.pad),s._hoverY=y(P?s.y1-E.tiling.pad/2:s.y0+E.tiling.pad/2);var p=n.select(this),d=i.ensureSingle(p,"path","surface",(function(t){t.style("pointer-events","all")}));w?d.transition().attrTween("d",(function(t){var e=k(t,!1,F(),[m,g],{orientation:E.tiling.orientation,flipX:E.tiling.flip.indexOf("x")>-1,flipY:E.tiling.flip.indexOf("y")>-1});return function(t){return x(e(t))}})):d.attr("d",x),p.call(f,r,t,e,{styleOne:l,eventDataKeys:c.eventDataKeys,transitionTime:c.CLICK_TRANSITION_TIME,transitionEasing:c.CLICK_TRANSITION_EASING}).call(u.setSliceCursor,t,{isTransitioning:t._transitioning}),d.call(l,s,E,{hovered:!1}),s.x0===s.x1||s.y0===s.y1?s._text="":s._text=h(s,r,E,e,S)||"";var T=i.ensureSingle(p,"g","slicetext"),M=i.ensureSingle(T,"text","",(function(t){t.attr("data-notex",1)})),I=i.ensureUniformFontSize(t,u.determineTextFont(E,s,S.font));M.text(s._text||" ").classed("slicetext",!0).attr("text-anchor",C?"end":L?"start":"middle").call(a.font,I).call(o.convertToTspans,t),s.textBB=a.bBox(M.node()),s.transform=b(s,{fontSize:I.size}),s.transform.fontSize=I.size,w?M.transition().attrTween("transform",(function(t){var e=A(t,!1,F(),[m,g]);return function(t){return _(e(t))}})):M.attr("transform",_(s))})),D}},{"../../components/drawing":383,"../../lib":498,"../../lib/svg_text_utils":524,"../sunburst/fx":1049,"../sunburst/helpers":1050,"../sunburst/plot":1054,"../treemap/constants":1073,"./partition":837,"./style":839,"@plotly/d3":58}],834:[function(t,e,r){"use strict";e.exports={moduleType:"trace",name:"icicle",basePlotModule:t("./base_plot"),categories:[],animatable:!0,attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults"),supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc").calc,crossTraceCalc:t("./calc").crossTraceCalc,plot:t("./plot"),style:t("./style").style,colorbar:t("../scatter/marker_colorbar"),meta:{}}},{"../scatter/marker_colorbar":940,"./attributes":829,"./base_plot":830,"./calc":831,"./defaults":832,"./layout_attributes":835,"./layout_defaults":836,"./plot":838,"./style":839}],835:[function(t,e,r){"use strict";e.exports={iciclecolorway:{valType:"colorlist",editType:"calc"},extendiciclecolors:{valType:"boolean",dflt:!0,editType:"calc"}}},{}],836:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes");e.exports=function(t,e){function r(r,a){return n.coerce(t,e,i,r,a)}r("iciclecolorway",e.colorway),r("extendiciclecolors")}},{"../../lib":498,"./layout_attributes":835}],837:[function(t,e,r){"use strict";var n=t("d3-hierarchy"),i=t("../treemap/flip_tree");e.exports=function(t,e,r){var a=r.flipX,o=r.flipY,s="h"===r.orientation,l=r.maxDepth,c=e[0],u=e[1];l&&(c=(t.height+1)*e[0]/Math.min(t.height+1,l),u=(t.height+1)*e[1]/Math.min(t.height+1,l));var f=n.partition().padding(r.pad.inner).size(s?[e[1],c]:[e[0],u])(t);return(s||a||o)&&i(f,e,{swapXY:s,flipX:a,flipY:o}),f}},{"../treemap/flip_tree":1078,"d3-hierarchy":110}],838:[function(t,e,r){"use strict";var n=t("../treemap/draw"),i=t("./draw_descendants");e.exports=function(t,e,r,a){return n(t,e,r,a,{type:"icicle",drawDescendants:i})}},{"../treemap/draw":1075,"./draw_descendants":833}],839:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../components/color"),a=t("../../lib"),o=t("../bar/uniform_text").resizeText;function s(t,e,r){var n=e.data.data,o=!e.children,s=n.i,l=a.castOption(r,s,"marker.line.color")||i.defaultLine,c=a.castOption(r,s,"marker.line.width")||0;t.style("stroke-width",c).call(i.fill,n.color).call(i.stroke,l).style("opacity",o?r.leaf.opacity:null)}e.exports={style:function(t){var e=t._fullLayout._iciclelayer.selectAll(".trace");o(t,e,"icicle"),e.each((function(t){var e=n.select(this),r=t[0].trace;e.style("opacity",r.opacity),e.selectAll("path.surface").each((function(t){n.select(this).call(s,t,r)}))}))},styleOne:s}},{"../../components/color":361,"../../lib":498,"../bar/uniform_text":659,"@plotly/d3":58}],840:[function(t,e,r){"use strict";for(var n=t("../../plots/attributes"),i=t("../../plots/template_attributes").hovertemplateAttrs,a=t("../../lib/extend").extendFlat,o=t("./constants").colormodel,s=["rgb","rgba","rgba256","hsl","hsla"],l=[],c=[],u=0;u0||n.inbox(r-o.y0,r-(o.y0+o.h*s.dy),0)>0)){var u,f=Math.floor((e-o.x0)/s.dx),h=Math.floor(Math.abs(r-o.y0)/s.dy);if(s._hasZ?u=o.z[h][f]:s._hasSource&&(u=s._canvas.el.getContext("2d").getImageData(f,h,1,1).data),u){var p,d=o.hi||s.hoverinfo;if(d){var m=d.split("+");-1!==m.indexOf("all")&&(m=["color"]),-1!==m.indexOf("color")&&(p=!0)}var g,v=a.colormodel[s.colormodel],y=v.colormodel||s.colormodel,x=y.length,b=s._scaler(u),_=v.suffix,w=[];(s.hovertemplate||p)&&(w.push("["+[b[0]+_[0],b[1]+_[1],b[2]+_[2]].join(", ")),4===x&&w.push(", "+b[3]+_[3]),w.push("]"),w=w.join(""),t.extraText=y.toUpperCase()+": "+w),Array.isArray(s.hovertext)&&Array.isArray(s.hovertext[h])?g=s.hovertext[h][f]:Array.isArray(s.text)&&Array.isArray(s.text[h])&&(g=s.text[h][f]);var T=c.c2p(o.y0+(h+.5)*s.dy),k=o.x0+(f+.5)*s.dx,A=o.y0+(h+.5)*s.dy,M="["+u.slice(0,s.colormodel.length).join(", ")+"]";return[i.extendFlat(t,{index:[h,f],x0:l.c2p(o.x0+f*s.dx),x1:l.c2p(o.x0+(f+1)*s.dx),y0:T,y1:T,color:b,xVal:k,xLabelVal:k,yVal:A,yLabelVal:A,zLabelVal:M,text:g,hovertemplateLabels:{zLabel:M,colorLabel:w,"color[0]Label":b[0]+_[0],"color[1]Label":b[1]+_[1],"color[2]Label":b[2]+_[2],"color[3]Label":b[3]+_[3]}})]}}}},{"../../components/fx":401,"../../lib":498,"./constants":842}],847:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),calc:t("./calc"),plot:t("./plot"),style:t("./style"),hoverPoints:t("./hover"),eventData:t("./event_data"),moduleType:"trace",name:"image",basePlotModule:t("../../plots/cartesian"),categories:["cartesian","svg","2dMap","noSortingByValue"],animatable:!1,meta:{}}},{"../../plots/cartesian":563,"./attributes":840,"./calc":841,"./defaults":843,"./event_data":844,"./hover":846,"./plot":848,"./style":849}],848:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=i.strTranslate,o=t("../../constants/xmlns_namespaces"),s=t("./constants"),l=i.isIOS()||i.isSafari()||i.isIE();e.exports=function(t,e,r,c){var u=e.xaxis,f=e.yaxis,h=!(l||t._context._exportedPlot);i.makeTraceGroups(c,r,"im").each((function(e){var r=n.select(this),l=e[0],c=l.trace,p=("fast"===c.zsmooth||!1===c.zsmooth&&h)&&!c._hasZ&&c._hasSource&&"linear"===u.type&&"linear"===f.type;c._realImage=p;var d,m,g,v,y,x,b=l.z,_=l.x0,w=l.y0,T=l.w,k=l.h,A=c.dx,M=c.dy;for(x=0;void 0===d&&x0;)m=u.c2p(_+x*A),x--;for(x=0;void 0===v&&x0;)y=f.c2p(w+x*M),x--;if(mI[0];if(O||z){var D=d+S/2,R=v+E/2;C+="transform:"+a(D+"px",R+"px")+"scale("+(O?-1:1)+","+(z?-1:1)+")"+a(-D+"px",-R+"px")+";"}}L.attr("style",C);var F=new Promise((function(t){if(c._hasZ)t();else if(c._hasSource)if(c._canvas&&c._canvas.el.width===T&&c._canvas.el.height===k&&c._canvas.source===c.source)t();else{var e=document.createElement("canvas");e.width=T,e.height=k;var r=e.getContext("2d");c._image=c._image||new Image;var n=c._image;n.onload=function(){r.drawImage(n,0,0),c._canvas={el:e,source:c.source},t()},n.setAttribute("src",c.source)}})).then((function(){var t;if(c._hasZ)t=B((function(t,e){return b[e][t]})).toDataURL("image/png");else if(c._hasSource)if(p)t=c.source;else{var e=c._canvas.el.getContext("2d").getImageData(0,0,T,k).data;t=B((function(t,r){var n=4*(r*T+t);return[e[n],e[n+1],e[n+2],e[n+3]]})).toDataURL("image/png")}L.attr({"xlink:href":t,height:E,width:S,x:d,y:v})}));t._promises.push(F)}function B(t){var e=document.createElement("canvas");e.width=S,e.height=E;var r,n=e.getContext("2d"),a=function(t){return i.constrain(Math.round(u.c2p(_+t*A)-d),0,S)},o=function(t){return i.constrain(Math.round(f.c2p(w+t*M)-v),0,E)},h=s.colormodel[c.colormodel],p=h.colormodel||c.colormodel,m=h.fmt;for(x=0;x0}function T(t){t.each((function(t){y.stroke(n.select(this),t.line.color)})).each((function(t){y.fill(n.select(this),t.color)})).style("stroke-width",(function(t){return t.line.width}))}function k(t,e,r){var n=t._fullLayout,i=o.extendFlat({type:"linear",ticks:"outside",range:r,showline:!0},e),a={type:"linear",_id:"x"+e._id},s={letter:"x",font:n.font,noHover:!0,noTickson:!0};function l(t,e){return o.coerce(i,a,v,t,e)}return m(i,a,l,s,n),g(i,a,l,s),a}function A(t,e,r){return[Math.min(e/t.width,r/t.height),t,e+"x"+r]}function M(t,e,r,i){var a=document.createElementNS("http://www.w3.org/2000/svg","text"),o=n.select(a);return o.text(t).attr("x",0).attr("y",0).attr("text-anchor",r).attr("data-unformatted",t).call(p.convertToTspans,i).call(f.font,e),f.bBox(o.node())}function S(t,e,r,n,i,a){var s="_cache"+e;t[s]&&t[s].key===i||(t[s]={key:i,value:r});var l=o.aggNums(a,null,[t[s].value,n],2);return t[s].value=l,l}e.exports=function(t,e,r,m){var g,v=t._fullLayout;w(r)&&m&&(g=m()),o.makeTraceGroups(v._indicatorlayer,e,"trace").each((function(e){var m,E,L,C,P,I=e[0].trace,O=n.select(this),z=I._hasGauge,D=I._isAngular,R=I._isBullet,F=I.domain,B={w:v._size.w*(F.x[1]-F.x[0]),h:v._size.h*(F.y[1]-F.y[0]),l:v._size.l+v._size.w*F.x[0],r:v._size.r+v._size.w*(1-F.x[1]),t:v._size.t+v._size.h*(1-F.y[1]),b:v._size.b+v._size.h*F.y[0]},N=B.l+B.w/2,j=B.t+B.h/2,U=Math.min(B.w/2,B.h),V=h.innerRadius*U,H=I.align||"center";if(E=j,z){if(D&&(m=N,E=j+U/2,L=function(t){return function(t,e){var r=Math.sqrt(t.width/2*(t.width/2)+t.height*t.height);return[e/r,t,e]}(t,.9*V)}),R){var q=h.bulletPadding,G=1-h.bulletNumberDomainSize+q;m=B.l+(G+(1-G)*b[H])*B.w,L=function(t){return A(t,(h.bulletNumberDomainSize-q)*B.w,B.h)}}}else m=B.l+b[H]*B.w,L=function(t){return A(t,B.w,B.h)};!function(t,e,r,i){var c,u,h,m=r[0].trace,g=i.numbersX,v=i.numbersY,T=m.align||"center",A=x[T],E=i.transitionOpts,L=i.onComplete,C=o.ensureSingle(e,"g","numbers"),P=[];m._hasNumber&&P.push("number");m._hasDelta&&(P.push("delta"),"left"===m.delta.position&&P.reverse());var I=C.selectAll("text").data(P);function O(e,r,n,i){if(!e.match("s")||n>=0==i>=0||r(n).slice(-1).match(_)||r(i).slice(-1).match(_))return r;var a=e.slice().replace("s","f").replace(/\d+/,(function(t){return parseInt(t)-1})),o=k(t,{tickformat:a});return function(t){return Math.abs(t)<1?d.tickText(o,t).text:r(t)}}I.enter().append("text"),I.attr("text-anchor",(function(){return A})).attr("class",(function(t){return t})).attr("x",null).attr("y",null).attr("dx",null).attr("dy",null),I.exit().remove();var z,D=m.mode+m.align;m._hasDelta&&(z=function(){var e=k(t,{tickformat:m.delta.valueformat},m._range);e.setScale(),d.prepTicks(e);var i=function(t){return d.tickText(e,t).text},o=function(t){return m.delta.relative?t.relativeDelta:t.delta},s=function(t,e){return 0===t||"number"!=typeof t||isNaN(t)?"-":(t>0?m.delta.increasing.symbol:m.delta.decreasing.symbol)+e(t)},l=function(t){return t.delta>=0?m.delta.increasing.color:m.delta.decreasing.color};void 0===m._deltaLastValue&&(m._deltaLastValue=o(r[0]));var c=C.select("text.delta");function h(){c.text(s(o(r[0]),i)).call(y.fill,l(r[0])).call(p.convertToTspans,t)}return c.call(f.font,m.delta.font).call(y.fill,l({delta:m._deltaLastValue})),w(E)?c.transition().duration(E.duration).ease(E.easing).tween("text",(function(){var t=n.select(this),e=o(r[0]),c=m._deltaLastValue,u=O(m.delta.valueformat,i,c,e),f=a(c,e);return m._deltaLastValue=e,function(e){t.text(s(f(e),u)),t.call(y.fill,l({delta:f(e)}))}})).each("end",(function(){h(),L&&L()})).each("interrupt",(function(){h(),L&&L()})):h(),u=M(s(o(r[0]),i),m.delta.font,A,t),c}(),D+=m.delta.position+m.delta.font.size+m.delta.font.family+m.delta.valueformat,D+=m.delta.increasing.symbol+m.delta.decreasing.symbol,h=u);m._hasNumber&&(!function(){var e=k(t,{tickformat:m.number.valueformat},m._range);e.setScale(),d.prepTicks(e);var i=function(t){return d.tickText(e,t).text},o=m.number.suffix,s=m.number.prefix,l=C.select("text.number");function u(){var e="number"==typeof r[0].y?s+i(r[0].y)+o:"-";l.text(e).call(f.font,m.number.font).call(p.convertToTspans,t)}w(E)?l.transition().duration(E.duration).ease(E.easing).each("end",(function(){u(),L&&L()})).each("interrupt",(function(){u(),L&&L()})).attrTween("text",(function(){var t=n.select(this),e=a(r[0].lastY,r[0].y);m._lastValue=r[0].y;var l=O(m.number.valueformat,i,r[0].lastY,r[0].y);return function(r){t.text(s+l(e(r))+o)}})):u(),c=M(s+i(r[0].y)+o,m.number.font,A,t)}(),D+=m.number.font.size+m.number.font.family+m.number.valueformat+m.number.suffix+m.number.prefix,h=c);if(m._hasDelta&&m._hasNumber){var R,F,B=[(c.left+c.right)/2,(c.top+c.bottom)/2],N=[(u.left+u.right)/2,(u.top+u.bottom)/2],j=.75*m.delta.font.size;"left"===m.delta.position&&(R=S(m,"deltaPos",0,-1*(c.width*b[m.align]+u.width*(1-b[m.align])+j),D,Math.min),F=B[1]-N[1],h={width:c.width+u.width+j,height:Math.max(c.height,u.height),left:u.left+R,right:c.right,top:Math.min(c.top,u.top+F),bottom:Math.max(c.bottom,u.bottom+F)}),"right"===m.delta.position&&(R=S(m,"deltaPos",0,c.width*(1-b[m.align])+u.width*b[m.align]+j,D,Math.max),F=B[1]-N[1],h={width:c.width+u.width+j,height:Math.max(c.height,u.height),left:c.left,right:u.right+R,top:Math.min(c.top,u.top+F),bottom:Math.max(c.bottom,u.bottom+F)}),"bottom"===m.delta.position&&(R=null,F=u.height,h={width:Math.max(c.width,u.width),height:c.height+u.height,left:Math.min(c.left,u.left),right:Math.max(c.right,u.right),top:c.bottom-c.height,bottom:c.bottom+u.height}),"top"===m.delta.position&&(R=null,F=c.top,h={width:Math.max(c.width,u.width),height:c.height+u.height,left:Math.min(c.left,u.left),right:Math.max(c.right,u.right),top:c.bottom-c.height-u.height,bottom:c.bottom}),z.attr({dx:R,dy:F})}(m._hasNumber||m._hasDelta)&&C.attr("transform",(function(){var t=i.numbersScaler(h);D+=t[2];var e,r=S(m,"numbersScale",1,t[0],D,Math.min);m._scaleNumbers||(r=1),e=m._isAngular?v-r*h.bottom:v-r*(h.top+h.bottom)/2,m._numbersTop=r*h.top+e;var n=h[T];"center"===T&&(n=(h.left+h.right)/2);var a=g-r*n;return a=S(m,"numbersTranslate",0,a,D,Math.max),l(a,e)+s(r)}))}(t,O,e,{numbersX:m,numbersY:E,numbersScaler:L,transitionOpts:r,onComplete:g}),z&&(C={range:I.gauge.axis.range,color:I.gauge.bgcolor,line:{color:I.gauge.bordercolor,width:0},thickness:1},P={range:I.gauge.axis.range,color:"rgba(0, 0, 0, 0)",line:{color:I.gauge.bordercolor,width:I.gauge.borderwidth},thickness:1});var Y=O.selectAll("g.angular").data(D?e:[]);Y.exit().remove();var W=O.selectAll("g.angularaxis").data(D?e:[]);W.exit().remove(),D&&function(t,e,r,a){var o,s,f,h,p=r[0].trace,m=a.size,g=a.radius,v=a.innerRadius,y=a.gaugeBg,x=a.gaugeOutline,b=[m.l+m.w/2,m.t+m.h/2+g/2],_=a.gauge,A=a.layer,M=a.transitionOpts,S=a.onComplete,E=Math.PI/2;function L(t){var e=p.gauge.axis.range[0],r=(t-e)/(p.gauge.axis.range[1]-e)*Math.PI-E;return r<-E?-E:r>E?E:r}function C(t){return n.svg.arc().innerRadius((v+g)/2-t/2*(g-v)).outerRadius((v+g)/2+t/2*(g-v)).startAngle(-E)}function P(t){t.attr("d",(function(t){return C(t.thickness).startAngle(L(t.range[0])).endAngle(L(t.range[1]))()}))}_.enter().append("g").classed("angular",!0),_.attr("transform",l(b[0],b[1])),A.enter().append("g").classed("angularaxis",!0).classed("crisp",!0),A.selectAll("g.xangularaxistick,path,text").remove(),(o=k(t,p.gauge.axis)).type="linear",o.range=p.gauge.axis.range,o._id="xangularaxis",o.ticklabeloverflow="allow",o.setScale();var I=function(t){return(o.range[0]-t.x)/(o.range[1]-o.range[0])*Math.PI+Math.PI},O={},z=d.makeLabelFns(o,0).labelStandoff;O.xFn=function(t){var e=I(t);return Math.cos(e)*z},O.yFn=function(t){var e=I(t),r=Math.sin(e)>0?.2:1;return-Math.sin(e)*(z+t.fontSize*r)+Math.abs(Math.cos(e))*(t.fontSize*u)},O.anchorFn=function(t){var e=I(t),r=Math.cos(e);return Math.abs(r)<.1?"middle":r>0?"start":"end"},O.heightFn=function(t,e,r){var n=I(t);return-.5*(1+Math.sin(n))*r};var D=function(t){return l(b[0]+g*Math.cos(t),b[1]-g*Math.sin(t))};f=function(t){return D(I(t))};if(s=d.calcTicks(o),h=d.getTickSigns(o)[2],o.visible){h="inside"===o.ticks?-1:1;var R=(o.linewidth||1)/2;d.drawTicks(t,o,{vals:s,layer:A,path:"M"+h*R+",0h"+h*o.ticklen,transFn:function(t){var e=I(t);return D(e)+"rotate("+-c(e)+")"}}),d.drawLabels(t,o,{vals:s,layer:A,transFn:f,labelFns:O})}var F=[y].concat(p.gauge.steps),B=_.selectAll("g.bg-arc").data(F);B.enter().append("g").classed("bg-arc",!0).append("path"),B.select("path").call(P).call(T),B.exit().remove();var N=C(p.gauge.bar.thickness),j=_.selectAll("g.value-arc").data([p.gauge.bar]);j.enter().append("g").classed("value-arc",!0).append("path");var U=j.select("path");w(M)?(U.transition().duration(M.duration).ease(M.easing).each("end",(function(){S&&S()})).each("interrupt",(function(){S&&S()})).attrTween("d",(V=N,H=L(r[0].lastY),q=L(r[0].y),function(){var t=i(H,q);return function(e){return V.endAngle(t(e))()}})),p._lastValue=r[0].y):U.attr("d","number"==typeof r[0].y?N.endAngle(L(r[0].y)):"M0,0Z");var V,H,q;U.call(T),j.exit().remove(),F=[];var G=p.gauge.threshold.value;(G||0===G)&&F.push({range:[G,G],color:p.gauge.threshold.color,line:{color:p.gauge.threshold.line.color,width:p.gauge.threshold.line.width},thickness:p.gauge.threshold.thickness});var Y=_.selectAll("g.threshold-arc").data(F);Y.enter().append("g").classed("threshold-arc",!0).append("path"),Y.select("path").call(P).call(T),Y.exit().remove();var W=_.selectAll("g.gauge-outline").data([x]);W.enter().append("g").classed("gauge-outline",!0).append("path"),W.select("path").call(P).call(T),W.exit().remove()}(t,0,e,{radius:U,innerRadius:V,gauge:Y,layer:W,size:B,gaugeBg:C,gaugeOutline:P,transitionOpts:r,onComplete:g});var X=O.selectAll("g.bullet").data(R?e:[]);X.exit().remove();var Z=O.selectAll("g.bulletaxis").data(R?e:[]);Z.exit().remove(),R&&function(t,e,r,n){var i,a,o,s,c,u=r[0].trace,f=n.gauge,p=n.layer,m=n.gaugeBg,g=n.gaugeOutline,v=n.size,x=u.domain,b=n.transitionOpts,_=n.onComplete;f.enter().append("g").classed("bullet",!0),f.attr("transform",l(v.l,v.t)),p.enter().append("g").classed("bulletaxis",!0).classed("crisp",!0),p.selectAll("g.xbulletaxistick,path,text").remove();var A=v.h,M=u.gauge.bar.thickness*A,S=x.x[0],E=x.x[0]+(x.x[1]-x.x[0])*(u._hasNumber||u._hasDelta?1-h.bulletNumberDomainSize:1);(i=k(t,u.gauge.axis))._id="xbulletaxis",i.domain=[S,E],i.setScale(),a=d.calcTicks(i),o=d.makeTransTickFn(i),s=d.getTickSigns(i)[2],c=v.t+v.h,i.visible&&(d.drawTicks(t,i,{vals:"inside"===i.ticks?d.clipEnds(i,a):a,layer:p,path:d.makeTickPath(i,c,s),transFn:o}),d.drawLabels(t,i,{vals:a,layer:p,transFn:o,labelFns:d.makeLabelFns(i,c)}));function L(t){t.attr("width",(function(t){return Math.max(0,i.c2p(t.range[1])-i.c2p(t.range[0]))})).attr("x",(function(t){return i.c2p(t.range[0])})).attr("y",(function(t){return.5*(1-t.thickness)*A})).attr("height",(function(t){return t.thickness*A}))}var C=[m].concat(u.gauge.steps),P=f.selectAll("g.bg-bullet").data(C);P.enter().append("g").classed("bg-bullet",!0).append("rect"),P.select("rect").call(L).call(T),P.exit().remove();var I=f.selectAll("g.value-bullet").data([u.gauge.bar]);I.enter().append("g").classed("value-bullet",!0).append("rect"),I.select("rect").attr("height",M).attr("y",(A-M)/2).call(T),w(b)?I.select("rect").transition().duration(b.duration).ease(b.easing).each("end",(function(){_&&_()})).each("interrupt",(function(){_&&_()})).attr("width",Math.max(0,i.c2p(Math.min(u.gauge.axis.range[1],r[0].y)))):I.select("rect").attr("width","number"==typeof r[0].y?Math.max(0,i.c2p(Math.min(u.gauge.axis.range[1],r[0].y))):0);I.exit().remove();var O=r.filter((function(){return u.gauge.threshold.value||0===u.gauge.threshold.value})),z=f.selectAll("g.threshold-bullet").data(O);z.enter().append("g").classed("threshold-bullet",!0).append("line"),z.select("line").attr("x1",i.c2p(u.gauge.threshold.value)).attr("x2",i.c2p(u.gauge.threshold.value)).attr("y1",(1-u.gauge.threshold.thickness)/2*A).attr("y2",(1-(1-u.gauge.threshold.thickness)/2)*A).call(y.stroke,u.gauge.threshold.line.color).style("stroke-width",u.gauge.threshold.line.width),z.exit().remove();var D=f.selectAll("g.gauge-outline").data([g]);D.enter().append("g").classed("gauge-outline",!0).append("rect"),D.select("rect").call(L).call(T),D.exit().remove()}(t,0,e,{gauge:X,layer:Z,size:B,gaugeBg:C,gaugeOutline:P,transitionOpts:r,onComplete:g});var J=O.selectAll("text.title").data(e);J.exit().remove(),J.enter().append("text").classed("title",!0),J.attr("text-anchor",(function(){return R?x.right:x[I.title.align]})).text(I.title.text).call(f.font,I.title.font).call(p.convertToTspans,t),J.attr("transform",(function(){var t,e=B.l+B.w*b[I.title.align],r=h.titlePadding,n=f.bBox(J.node());if(z){if(D)if(I.gauge.axis.visible)t=f.bBox(W.node()).top-r-n.bottom;else t=B.t+B.h/2-U/2-n.bottom-r;R&&(t=E-(n.top+n.bottom)/2,e=B.l-h.bulletPadding*B.w)}else t=I._numbersTop-r-n.bottom;return l(e,t)}))}))}},{"../../components/color":361,"../../components/drawing":383,"../../constants/alignment":466,"../../lib":498,"../../lib/svg_text_utils":524,"../../plots/cartesian/axes":549,"../../plots/cartesian/axis_defaults":551,"../../plots/cartesian/layout_attributes":564,"../../plots/cartesian/position_defaults":567,"./constants":853,"@plotly/d3":58,"d3-interpolate":111}],857:[function(t,e,r){"use strict";var n=t("../../components/colorscale/attributes"),i=t("../../plots/cartesian/axis_format_attributes").axisHoverFormat,a=t("../../plots/template_attributes").hovertemplateAttrs,o=t("../mesh3d/attributes"),s=t("../../plots/attributes"),l=t("../../lib/extend").extendFlat,c=t("../../plot_api/edit_types").overrideAll;var u=e.exports=c(l({x:{valType:"data_array"},y:{valType:"data_array"},z:{valType:"data_array"},value:{valType:"data_array"},isomin:{valType:"number"},isomax:{valType:"number"},surface:{show:{valType:"boolean",dflt:!0},count:{valType:"integer",dflt:2,min:1},fill:{valType:"number",min:0,max:1,dflt:1},pattern:{valType:"flaglist",flags:["A","B","C","D","E"],extras:["all","odd","even"],dflt:"all"}},spaceframe:{show:{valType:"boolean",dflt:!1},fill:{valType:"number",min:0,max:1,dflt:.15}},slices:{x:{show:{valType:"boolean",dflt:!1},locations:{valType:"data_array",dflt:[]},fill:{valType:"number",min:0,max:1,dflt:1}},y:{show:{valType:"boolean",dflt:!1},locations:{valType:"data_array",dflt:[]},fill:{valType:"number",min:0,max:1,dflt:1}},z:{show:{valType:"boolean",dflt:!1},locations:{valType:"data_array",dflt:[]},fill:{valType:"number",min:0,max:1,dflt:1}}},caps:{x:{show:{valType:"boolean",dflt:!0},fill:{valType:"number",min:0,max:1,dflt:1}},y:{show:{valType:"boolean",dflt:!0},fill:{valType:"number",min:0,max:1,dflt:1}},z:{show:{valType:"boolean",dflt:!0},fill:{valType:"number",min:0,max:1,dflt:1}}},text:{valType:"string",dflt:"",arrayOk:!0},hovertext:{valType:"string",dflt:"",arrayOk:!0},hovertemplate:a(),xhoverformat:i("x"),yhoverformat:i("y"),zhoverformat:i("z"),valuehoverformat:i("value",1),showlegend:l({},s.showlegend,{dflt:!1})},n("",{colorAttr:"`value`",showScaleDflt:!0,editTypeOverride:"calc"}),{opacity:o.opacity,lightposition:o.lightposition,lighting:o.lighting,flatshading:o.flatshading,contour:o.contour,hoverinfo:l({},s.hoverinfo)}),"calc","nested");u.flatshading.dflt=!0,u.lighting.facenormalsepsilon.dflt=0,u.x.editType=u.y.editType=u.z.editType=u.value.editType="calc+clearAxisTypes",u.transforms=void 0},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plot_api/edit_types":531,"../../plots/attributes":545,"../../plots/cartesian/axis_format_attributes":552,"../../plots/template_attributes":628,"../mesh3d/attributes":862}],858:[function(t,e,r){"use strict";var n=t("../../components/colorscale/calc"),i=t("../streamtube/calc").processGrid,a=t("../streamtube/calc").filter;e.exports=function(t,e){e._len=Math.min(e.x.length,e.y.length,e.z.length,e.value.length),e._x=a(e.x,e._len),e._y=a(e.y,e._len),e._z=a(e.z,e._len),e._value=a(e.value,e._len);var r=i(e);e._gridFill=r.fill,e._Xs=r.Xs,e._Ys=r.Ys,e._Zs=r.Zs,e._len=r.len;for(var o=1/0,s=-1/0,l=0;l0;r--){var n=Math.min(e[r],e[r-1]),i=Math.max(e[r],e[r-1]);if(i>n&&n-1}function R(t,e){return null===t?e:t}function F(e,r,n){C();var i,a,o,l=[r],c=[n];if(s>=1)l=[r],c=[n];else if(s>0){var u=function(t,e){var r=t[0],n=t[1],i=t[2],a=function(t,e,r){for(var n=[],i=0;i-1?n[p]:L(d,m,v);h[p]=x>-1?x:I(d,m,v,R(e,y))}i=h[0],a=h[1],o=h[2],t._meshI.push(i),t._meshJ.push(a),t._meshK.push(o),++g}}function B(t,e,r,n){var i=t[3];in&&(i=n);for(var a=(t[3]-i)/(t[3]-e[3]+1e-9),o=[],s=0;s<4;s++)o[s]=(1-a)*t[s]+a*e[s];return o}function N(t,e,r){return t>=e&&t<=r}function j(t){var e=.001*(E-S);return t>=S-e&&t<=E+e}function U(e){for(var r=[],n=0;n<4;n++){var i=e[n];r.push([t._x[i],t._y[i],t._z[i],t._value[i]])}return r}function V(t,e,r,n,i,a){a||(a=1),r=[-1,-1,-1];var o=!1,s=[N(e[0][3],n,i),N(e[1][3],n,i),N(e[2][3],n,i)];if(!s[0]&&!s[1]&&!s[2])return!1;var l=function(t,e,r){return j(e[0][3])&&j(e[1][3])&&j(e[2][3])?(F(t,e,r),!0):a<3&&V(t,e,r,S,E,++a)};if(s[0]&&s[1]&&s[2])return l(t,e,r)||o;var c=!1;return[[0,1,2],[2,0,1],[1,2,0]].forEach((function(a){if(s[a[0]]&&s[a[1]]&&!s[a[2]]){var u=e[a[0]],f=e[a[1]],h=e[a[2]],p=B(h,u,n,i),d=B(h,f,n,i);o=l(t,[d,p,u],[-1,-1,r[a[0]]])||o,o=l(t,[u,f,d],[r[a[0]],r[a[1]],-1])||o,c=!0}})),c||[[0,1,2],[1,2,0],[2,0,1]].forEach((function(a){if(s[a[0]]&&!s[a[1]]&&!s[a[2]]){var u=e[a[0]],f=e[a[1]],h=e[a[2]],p=B(f,u,n,i),d=B(h,u,n,i);o=l(t,[d,p,u],[-1,-1,r[a[0]]])||o,c=!0}})),o}function H(t,e,r,n){var i=!1,a=U(e),o=[N(a[0][3],r,n),N(a[1][3],r,n),N(a[2][3],r,n),N(a[3][3],r,n)];if(!(o[0]||o[1]||o[2]||o[3]))return i;if(o[0]&&o[1]&&o[2]&&o[3])return m&&(i=function(t,e,r){var n=function(n,i,a){F(t,[e[n],e[i],e[a]],[r[n],r[i],r[a]])};n(0,1,2),n(3,0,1),n(2,3,0),n(1,2,3)}(t,a,e)||i),i;var s=!1;return[[0,1,2,3],[3,0,1,2],[2,3,0,1],[1,2,3,0]].forEach((function(l){if(o[l[0]]&&o[l[1]]&&o[l[2]]&&!o[l[3]]){var c=a[l[0]],u=a[l[1]],f=a[l[2]],h=a[l[3]];if(m)i=F(t,[c,u,f],[e[l[0]],e[l[1]],e[l[2]]])||i;else{var p=B(h,c,r,n),d=B(h,u,r,n),g=B(h,f,r,n);i=F(null,[p,d,g],[-1,-1,-1])||i}s=!0}})),s?i:([[0,1,2,3],[1,2,3,0],[2,3,0,1],[3,0,1,2],[0,2,3,1],[1,3,2,0]].forEach((function(l){if(o[l[0]]&&o[l[1]]&&!o[l[2]]&&!o[l[3]]){var c=a[l[0]],u=a[l[1]],f=a[l[2]],h=a[l[3]],p=B(f,c,r,n),d=B(f,u,r,n),g=B(h,u,r,n),v=B(h,c,r,n);m?(i=F(t,[c,v,p],[e[l[0]],-1,-1])||i,i=F(t,[u,d,g],[e[l[1]],-1,-1])||i):i=function(t,e,r){var n=function(n,i,a){F(t,[e[n],e[i],e[a]],[r[n],r[i],r[a]])};n(0,1,2),n(2,3,0)}(null,[p,d,g,v],[-1,-1,-1,-1])||i,s=!0}})),s||[[0,1,2,3],[1,2,3,0],[2,3,0,1],[3,0,1,2]].forEach((function(l){if(o[l[0]]&&!o[l[1]]&&!o[l[2]]&&!o[l[3]]){var c=a[l[0]],u=a[l[1]],f=a[l[2]],h=a[l[3]],p=B(u,c,r,n),d=B(f,c,r,n),g=B(h,c,r,n);m?(i=F(t,[c,p,d],[e[l[0]],-1,-1])||i,i=F(t,[c,d,g],[e[l[0]],-1,-1])||i,i=F(t,[c,g,p],[e[l[0]],-1,-1])||i):i=F(null,[p,d,g],[-1,-1,-1])||i,s=!0}})),i)}function q(t,e,r,n,i,a,o,s,l,c,u){var f=!1;return d&&(D(t,"A")&&(f=H(null,[e,r,n,a],c,u)||f),D(t,"B")&&(f=H(null,[r,n,i,l],c,u)||f),D(t,"C")&&(f=H(null,[r,a,o,l],c,u)||f),D(t,"D")&&(f=H(null,[n,a,s,l],c,u)||f),D(t,"E")&&(f=H(null,[r,n,a,l],c,u)||f)),m&&(f=H(t,[r,n,a,l],c,u)||f),f}function G(t,e,r,n,i,a,o,s){return[!0===s[0]||V(t,U([e,r,n]),[e,r,n],a,o),!0===s[1]||V(t,U([n,i,e]),[n,i,e],a,o)]}function Y(t,e,r,n,i,a,o,s,l){return s?G(t,e,r,i,n,a,o,l):G(t,r,i,n,e,a,o,l)}function W(t,e,r,n,i,a,o){var s,l,c,u,f=!1,h=function(){f=V(t,[s,l,c],[-1,-1,-1],i,a)||f,f=V(t,[c,u,s],[-1,-1,-1],i,a)||f},p=o[0],d=o[1],m=o[2];return p&&(s=O(U([k(e,r-0,n-0)])[0],U([k(e-1,r-0,n-0)])[0],p),l=O(U([k(e,r-0,n-1)])[0],U([k(e-1,r-0,n-1)])[0],p),c=O(U([k(e,r-1,n-1)])[0],U([k(e-1,r-1,n-1)])[0],p),u=O(U([k(e,r-1,n-0)])[0],U([k(e-1,r-1,n-0)])[0],p),h()),d&&(s=O(U([k(e-0,r,n-0)])[0],U([k(e-0,r-1,n-0)])[0],d),l=O(U([k(e-0,r,n-1)])[0],U([k(e-0,r-1,n-1)])[0],d),c=O(U([k(e-1,r,n-1)])[0],U([k(e-1,r-1,n-1)])[0],d),u=O(U([k(e-1,r,n-0)])[0],U([k(e-1,r-1,n-0)])[0],d),h()),m&&(s=O(U([k(e-0,r-0,n)])[0],U([k(e-0,r-0,n-1)])[0],m),l=O(U([k(e-0,r-1,n)])[0],U([k(e-0,r-1,n-1)])[0],m),c=O(U([k(e-1,r-1,n)])[0],U([k(e-1,r-1,n-1)])[0],m),u=O(U([k(e-1,r-0,n)])[0],U([k(e-1,r-0,n-1)])[0],m),h()),f}function X(t,e,r,n,i,a,o,s,l,c,u,f){var h=t;return f?(d&&"even"===t&&(h=null),q(h,e,r,n,i,a,o,s,l,c,u)):(d&&"odd"===t&&(h=null),q(h,l,s,o,a,i,n,r,e,c,u))}function Z(t,e,r,n,i){for(var a=[],o=0,s=0;sMath.abs(d-M)?[A,d]:[d,M];$(e,T[0],T[1])}}var L=[[Math.min(S,M),Math.max(S,M)],[Math.min(A,E),Math.max(A,E)]];["x","y","z"].forEach((function(e){for(var r=[],n=0;n0&&(u.push(p.id),"x"===e?f.push([p.distRatio,0,0]):"y"===e?f.push([0,p.distRatio,0]):f.push([0,0,p.distRatio]))}else c=nt(1,"x"===e?b-1:"y"===e?_-1:w-1);u.length>0&&(r[i]="x"===e?tt(null,u,a,o,f,r[i]):"y"===e?et(null,u,a,o,f,r[i]):rt(null,u,a,o,f,r[i]),i++),c.length>0&&(r[i]="x"===e?Z(null,c,a,o,r[i]):"y"===e?J(null,c,a,o,r[i]):K(null,c,a,o,r[i]),i++)}var d=t.caps[e];d.show&&d.fill&&(z(d.fill),r[i]="x"===e?Z(null,[0,b-1],a,o,r[i]):"y"===e?J(null,[0,_-1],a,o,r[i]):K(null,[0,w-1],a,o,r[i]),i++)}})),0===g&&P(),t._meshX=n,t._meshY=i,t._meshZ=a,t._meshIntensity=o,t._Xs=v,t._Ys=y,t._Zs=x}(),t}e.exports={findNearestOnAxis:l,generateIsoMeshes:h,createIsosurfaceTrace:function(t,e){var r=t.glplot.gl,i=n({gl:r}),a=new c(t,i,e.uid);return i._trace=a,a.update(e),t.glplot.add(i),a}}},{"../../../stackgl_modules":1119,"../../components/colorscale":373,"../../lib/gl_format_color":494,"../../lib/str2rgbarray":523,"../../plots/gl3d/zip3":604}],860:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../registry"),a=t("./attributes"),o=t("../../components/colorscale/defaults");function s(t,e,r,n,a){var s=a("isomin"),l=a("isomax");null!=l&&null!=s&&s>l&&(e.isomin=null,e.isomax=null);var c=a("x"),u=a("y"),f=a("z"),h=a("value");c&&c.length&&u&&u.length&&f&&f.length&&h&&h.length?(i.getComponentMethod("calendars","handleTraceDefaults")(t,e,["x","y","z"],n),a("valuehoverformat"),["x","y","z"].forEach((function(t){a(t+"hoverformat");var e="caps."+t;a(e+".show")&&a(e+".fill");var r="slices."+t;a(r+".show")&&(a(r+".fill"),a(r+".locations"))})),a("spaceframe.show")&&a("spaceframe.fill"),a("surface.show")&&(a("surface.count"),a("surface.fill"),a("surface.pattern")),a("contour.show")&&(a("contour.color"),a("contour.width")),["text","hovertext","hovertemplate","lighting.ambient","lighting.diffuse","lighting.specular","lighting.roughness","lighting.fresnel","lighting.vertexnormalsepsilon","lighting.facenormalsepsilon","lightposition.x","lightposition.y","lightposition.z","flatshading","opacity"].forEach((function(t){a(t)})),o(t,e,n,a,{prefix:"",cLetter:"c"}),e._length=null):e.visible=!1}e.exports={supplyDefaults:function(t,e,r,i){s(t,e,r,i,(function(r,i){return n.coerce(t,e,a,r,i)}))},supplyIsoDefaults:s}},{"../../components/colorscale/defaults":371,"../../lib":498,"../../registry":633,"./attributes":857}],861:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults").supplyDefaults,calc:t("./calc"),colorbar:{min:"cmin",max:"cmax"},plot:t("./convert").createIsosurfaceTrace,moduleType:"trace",name:"isosurface",basePlotModule:t("../../plots/gl3d"),categories:["gl3d","showLegend"],meta:{}}},{"../../plots/gl3d":593,"./attributes":857,"./calc":858,"./convert":859,"./defaults":860}],862:[function(t,e,r){"use strict";var n=t("../../components/colorscale/attributes"),i=t("../../plots/cartesian/axis_format_attributes").axisHoverFormat,a=t("../../plots/template_attributes").hovertemplateAttrs,o=t("../surface/attributes"),s=t("../../plots/attributes"),l=t("../../lib/extend").extendFlat;e.exports=l({x:{valType:"data_array",editType:"calc+clearAxisTypes"},y:{valType:"data_array",editType:"calc+clearAxisTypes"},z:{valType:"data_array",editType:"calc+clearAxisTypes"},i:{valType:"data_array",editType:"calc"},j:{valType:"data_array",editType:"calc"},k:{valType:"data_array",editType:"calc"},text:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertext:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertemplate:a({editType:"calc"}),xhoverformat:i("x"),yhoverformat:i("y"),zhoverformat:i("z"),delaunayaxis:{valType:"enumerated",values:["x","y","z"],dflt:"z",editType:"calc"},alphahull:{valType:"number",dflt:-1,editType:"calc"},intensity:{valType:"data_array",editType:"calc"},intensitymode:{valType:"enumerated",values:["vertex","cell"],dflt:"vertex",editType:"calc"},color:{valType:"color",editType:"calc"},vertexcolor:{valType:"data_array",editType:"calc"},facecolor:{valType:"data_array",editType:"calc"},transforms:void 0},n("",{colorAttr:"`intensity`",showScaleDflt:!0,editTypeOverride:"calc"}),{opacity:o.opacity,flatshading:{valType:"boolean",dflt:!1,editType:"calc"},contour:{show:l({},o.contours.x.show,{}),color:o.contours.x.color,width:o.contours.x.width,editType:"calc"},lightposition:{x:l({},o.lightposition.x,{dflt:1e5}),y:l({},o.lightposition.y,{dflt:1e5}),z:l({},o.lightposition.z,{dflt:0}),editType:"calc"},lighting:l({vertexnormalsepsilon:{valType:"number",min:0,max:1,dflt:1e-12,editType:"calc"},facenormalsepsilon:{valType:"number",min:0,max:1,dflt:1e-6,editType:"calc"},editType:"calc"},o.lighting),hoverinfo:l({},s.hoverinfo,{editType:"calc"}),showlegend:l({},s.showlegend,{dflt:!1})})},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plots/attributes":545,"../../plots/cartesian/axis_format_attributes":552,"../../plots/template_attributes":628,"../surface/attributes":1056}],863:[function(t,e,r){"use strict";var n=t("../../components/colorscale/calc");e.exports=function(t,e){e.intensity&&n(t,e,{vals:e.intensity,containerStr:"",cLetter:"c"})}},{"../../components/colorscale/calc":369}],864:[function(t,e,r){"use strict";var n=t("../../../stackgl_modules").gl_mesh3d,i=t("../../../stackgl_modules").delaunay_triangulate,a=t("../../../stackgl_modules").alpha_shape,o=t("../../../stackgl_modules").convex_hull,s=t("../../lib/gl_format_color").parseColorScale,l=t("../../lib/str2rgbarray"),c=t("../../components/colorscale").extractOpts,u=t("../../plots/gl3d/zip3");function f(t,e,r){this.scene=t,this.uid=r,this.mesh=e,this.name="",this.color="#fff",this.data=null,this.showContour=!1}var h=f.prototype;function p(t){for(var e=[],r=t.length,n=0;n=e-.5)return!1;return!0}h.handlePick=function(t){if(t.object===this.mesh){var e=t.index=t.data.index;t.data._cellCenter?t.traceCoordinate=t.data.dataCoordinate:t.traceCoordinate=[this.data.x[e],this.data.y[e],this.data.z[e]];var r=this.data.hovertext||this.data.text;return Array.isArray(r)&&void 0!==r[e]?t.textLabel=r[e]:r&&(t.textLabel=r),!0}},h.update=function(t){var e=this.scene,r=e.fullSceneLayout;this.data=t;var n,f=t.x.length,h=u(d(r.xaxis,t.x,e.dataScale[0],t.xcalendar),d(r.yaxis,t.y,e.dataScale[1],t.ycalendar),d(r.zaxis,t.z,e.dataScale[2],t.zcalendar));if(t.i&&t.j&&t.k){if(t.i.length!==t.j.length||t.j.length!==t.k.length||!g(t.i,f)||!g(t.j,f)||!g(t.k,f))return;n=u(m(t.i),m(t.j),m(t.k))}else n=0===t.alphahull?o(h):t.alphahull>0?a(t.alphahull,h):function(t,e){for(var r=["x","y","z"].indexOf(t),n=[],a=e.length,o=0;ov):g=A>w,v=A;var M=c(w,T,k,A);M.pos=_,M.yc=(w+A)/2,M.i=b,M.dir=g?"increasing":"decreasing",M.x=M.pos,M.y=[k,T],y&&(M.orig_p=r[b]),d&&(M.tx=e.text[b]),m&&(M.htx=e.hovertext[b]),x.push(M)}else x.push({pos:_,empty:!0})}return e._extremes[l._id]=a.findExtremes(l,n.concat(h,f),{padded:!0}),x.length&&(x[0].t={labels:{open:i(t,"open:")+" ",high:i(t,"high:")+" ",low:i(t,"low:")+" ",close:i(t,"close:")+" "}}),x}e.exports={calc:function(t,e){var r=a.getFromId(t,e.xaxis),i=a.getFromId(t,e.yaxis),s=function(t,e,r){var i=r._minDiff;if(!i){var a,s=t._fullData,l=[];for(i=1/0,a=0;a"+c.labels[x]+n.hoverLabelText(s,b,l.yhoverformat):((y=i.extendFlat({},h)).y0=y.y1=_,y.yLabelVal=b,y.yLabel=c.labels[x]+n.hoverLabelText(s,b,l.yhoverformat),y.name="",f.push(y),g[b]=y)}return f}function h(t,e,r,i){var a=t.cd,o=t.ya,l=a[0].trace,f=a[0].t,h=u(t,e,r,i);if(!h)return[];var p=a[h.index],d=h.index=p.i,m=p.dir;function g(t){return f.labels[t]+n.hoverLabelText(o,l[t][d],l.yhoverformat)}var v=p.hi||l.hoverinfo,y=v.split("+"),x="all"===v,b=x||-1!==y.indexOf("y"),_=x||-1!==y.indexOf("text"),w=b?[g("open"),g("high"),g("low"),g("close")+" "+c[m]]:[];return _&&s(p,l,w),h.extraText=w.join("
"),h.y0=h.y1=o.c2p(p.yc,!0),[h]}e.exports={hoverPoints:function(t,e,r,n){return t.cd[0].trace.hoverlabel.split?f(t,e,r,n):h(t,e,r,n)},hoverSplit:f,hoverOnPoints:h}},{"../../components/color":361,"../../components/fx":401,"../../constants/delta.js":468,"../../lib":498,"../../plots/cartesian/axes":549}],871:[function(t,e,r){"use strict";e.exports={moduleType:"trace",name:"ohlc",basePlotModule:t("../../plots/cartesian"),categories:["cartesian","svg","showLegend"],meta:{},attributes:t("./attributes"),supplyDefaults:t("./defaults"),calc:t("./calc").calc,plot:t("./plot"),style:t("./style"),hoverPoints:t("./hover").hoverPoints,selectPoints:t("./select")}},{"../../plots/cartesian":563,"./attributes":867,"./calc":868,"./defaults":869,"./hover":870,"./plot":873,"./select":874,"./style":875}],872:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../lib");e.exports=function(t,e,r,a){var o=r("x"),s=r("open"),l=r("high"),c=r("low"),u=r("close");if(r("hoverlabel.split"),n.getComponentMethod("calendars","handleTraceDefaults")(t,e,["x"],a),s&&l&&c&&u){var f=Math.min(s.length,l.length,c.length,u.length);return o&&(f=Math.min(f,i.minRowLength(o))),e._length=f,f}}},{"../../lib":498,"../../registry":633}],873:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib");e.exports=function(t,e,r,a){var o=e.yaxis,s=e.xaxis,l=!!s.rangebreaks;i.makeTraceGroups(a,r,"trace ohlc").each((function(t){var e=n.select(this),r=t[0],a=r.t;if(!0!==r.trace.visible||a.empty)e.remove();else{var c=a.tickLen,u=e.selectAll("path").data(i.identity);u.enter().append("path"),u.exit().remove(),u.attr("d",(function(t){if(t.empty)return"M0,0Z";var e=s.c2p(t.pos-c,!0),r=s.c2p(t.pos+c,!0),n=l?(e+r)/2:s.c2p(t.pos,!0);return"M"+e+","+o.c2p(t.o,!0)+"H"+n+"M"+n+","+o.c2p(t.h,!0)+"V"+o.c2p(t.l,!0)+"M"+r+","+o.c2p(t.c,!0)+"H"+n}))}}))}},{"../../lib":498,"@plotly/d3":58}],874:[function(t,e,r){"use strict";e.exports=function(t,e){var r,n=t.cd,i=t.xaxis,a=t.yaxis,o=[],s=n[0].t.bPos||0;if(!1===e)for(r=0;r=t.length)return!1;if(void 0!==e[t[r]])return!1;e[t[r]]=!0}return!0}(t.map((function(t){return t.displayindex}))))for(e=0;e0;c&&(o="array");var u=r("categoryorder",o);"array"===u?(r("categoryarray"),r("ticktext")):(delete t.categoryarray,delete t.ticktext),c||"array"!==u||(e.categoryorder="trace")}}e.exports=function(t,e,r,f){function h(r,i){return n.coerce(t,e,l,r,i)}var p=s(t,e,{name:"dimensions",handleItemDefaults:u}),d=function(t,e,r,o,s){s("line.shape"),s("line.hovertemplate");var l=s("line.color",o.colorway[0]);if(i(t,"line")&&n.isArrayOrTypedArray(l)){if(l.length)return s("line.colorscale"),a(t,e,o,s,{prefix:"line.",cLetter:"c"}),l.length;e.line.color=r}return 1/0}(t,e,r,f,h);o(e,f,h),Array.isArray(p)&&p.length||(e.visible=!1),c(e,p,"values",d),h("hoveron"),h("hovertemplate"),h("arrangement"),h("bundlecolors"),h("sortpaths"),h("counts");var m={family:f.font.family,size:Math.round(f.font.size),color:f.font.color};n.coerceFont(h,"labelfont",m);var g={family:f.font.family,size:Math.round(f.font.size/1.2),color:f.font.color};n.coerceFont(h,"tickfont",g)}},{"../../components/colorscale/defaults":371,"../../components/colorscale/helpers":372,"../../lib":498,"../../plots/array_container_defaults":544,"../../plots/domain":579,"../parcoords/merge_length":893,"./attributes":876}],880:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),calc:t("./calc"),plot:t("./plot"),colorbar:{container:"line",min:"cmin",max:"cmax"},moduleType:"trace",name:"parcats",basePlotModule:t("./base_plot"),categories:["noOpacity"],meta:{}}},{"./attributes":876,"./base_plot":877,"./calc":878,"./defaults":879,"./plot":882}],881:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("d3-interpolate").interpolateNumber,a=t("../../plot_api/plot_api"),o=t("../../components/fx"),s=t("../../lib"),l=s.strTranslate,c=t("../../components/drawing"),u=t("tinycolor2"),f=t("../../lib/svg_text_utils");function h(t,e,r,i){var a=t.map(F.bind(0,e,r)),o=i.selectAll("g.parcatslayer").data([null]);o.enter().append("g").attr("class","parcatslayer").style("pointer-events","all");var u=o.selectAll("g.trace.parcats").data(a,p),h=u.enter().append("g").attr("class","trace parcats");u.attr("transform",(function(t){return l(t.x,t.y)})),h.append("g").attr("class","paths");var y=u.select("g.paths").selectAll("path.path").data((function(t){return t.paths}),p);y.attr("fill",(function(t){return t.model.color}));var x=y.enter().append("path").attr("class","path").attr("stroke-opacity",0).attr("fill",(function(t){return t.model.color})).attr("fill-opacity",0);_(x),y.attr("d",(function(t){return t.svgD})),x.empty()||y.sort(m),y.exit().remove(),y.on("mouseover",g).on("mouseout",v).on("click",b),h.append("g").attr("class","dimensions");var w=u.select("g.dimensions").selectAll("g.dimension").data((function(t){return t.dimensions}),p);w.enter().append("g").attr("class","dimension"),w.attr("transform",(function(t){return l(t.x,0)})),w.exit().remove();var A=w.selectAll("g.category").data((function(t){return t.categories}),p),M=A.enter().append("g").attr("class","category");A.attr("transform",(function(t){return l(0,t.y)})),M.append("rect").attr("class","catrect").attr("pointer-events","none"),A.select("rect.catrect").attr("fill","none").attr("width",(function(t){return t.width})).attr("height",(function(t){return t.height})),T(M);var S=A.selectAll("rect.bandrect").data((function(t){return t.bands}),p);S.each((function(){s.raiseToTop(this)})),S.attr("fill",(function(t){return t.color}));var E=S.enter().append("rect").attr("class","bandrect").attr("stroke-opacity",0).attr("fill",(function(t){return t.color})).attr("fill-opacity",0);S.attr("fill",(function(t){return t.color})).attr("width",(function(t){return t.width})).attr("height",(function(t){return t.height})).attr("y",(function(t){return t.y})).attr("cursor",(function(t){return"fixed"===t.parcatsViewModel.arrangement?"default":"perpendicular"===t.parcatsViewModel.arrangement?"ns-resize":"move"})),k(E),S.exit().remove(),M.append("text").attr("class","catlabel").attr("pointer-events","none");var z=e._fullLayout.paper_bgcolor;A.select("text.catlabel").attr("text-anchor",(function(t){return d(t)?"start":"end"})).attr("alignment-baseline","middle").style("text-shadow",f.makeTextShadow(z)).style("fill","rgb(0, 0, 0)").attr("x",(function(t){return d(t)?t.width+5:-5})).attr("y",(function(t){return t.height/2})).text((function(t){return t.model.categoryLabel})).each((function(t){c.font(n.select(this),t.parcatsViewModel.categorylabelfont),f.convertToTspans(n.select(this),e)})),M.append("text").attr("class","dimlabel"),A.select("text.dimlabel").attr("text-anchor","middle").attr("alignment-baseline","baseline").attr("cursor",(function(t){return"fixed"===t.parcatsViewModel.arrangement?"default":"ew-resize"})).attr("x",(function(t){return t.width/2})).attr("y",-5).text((function(t,e){return 0===e?t.parcatsViewModel.model.dimensions[t.model.dimensionInd].dimensionLabel:null})).each((function(t){c.font(n.select(this),t.parcatsViewModel.labelfont)})),A.selectAll("rect.bandrect").on("mouseover",L).on("mouseout",C),A.exit().remove(),w.call(n.behavior.drag().origin((function(t){return{x:t.x,y:0}})).on("dragstart",P).on("drag",I).on("dragend",O)),u.each((function(t){t.traceSelection=n.select(this),t.pathSelection=n.select(this).selectAll("g.paths").selectAll("path.path"),t.dimensionSelection=n.select(this).selectAll("g.dimensions").selectAll("g.dimension")})),u.exit().remove()}function p(t){return t.key}function d(t){var e=t.parcatsViewModel.dimensions.length,r=t.parcatsViewModel.dimensions[e-1].model.dimensionInd;return t.model.dimensionInd===r}function m(t,e){return t.model.rawColor>e.model.rawColor?1:t.model.rawColor"),L=n.mouse(f)[0];o.loneHover({trace:h,x:b-d.left+m.left,y:_-d.top+m.top,text:E,color:t.model.color,borderColor:"black",fontFamily:'Monaco, "Courier New", monospace',fontSize:10,fontColor:T,idealAlign:L1&&h.displayInd===f.dimensions.length-1?(i=c.left,a="left"):(i=c.left+c.width,a="right");var m=u.model.count,g=u.model.categoryLabel,v=m/u.parcatsViewModel.model.count,y={countLabel:m,categoryLabel:g,probabilityLabel:v.toFixed(3)},x=[];-1!==u.parcatsViewModel.hoverinfoItems.indexOf("count")&&x.push(["Count:",y.countLabel].join(" ")),-1!==u.parcatsViewModel.hoverinfoItems.indexOf("probability")&&x.push(["P("+y.categoryLabel+"):",y.probabilityLabel].join(" "));var b=x.join("
");return{trace:p,x:o*(i-e.left),y:s*(d-e.top),text:b,color:"lightgray",borderColor:"black",fontFamily:'Monaco, "Courier New", monospace',fontSize:12,fontColor:"black",idealAlign:a,hovertemplate:p.hovertemplate,hovertemplateLabels:y,eventData:[{data:p._input,fullData:p,count:m,category:g,probability:v}]}}function L(t){if(!t.parcatsViewModel.dragDimension&&-1===t.parcatsViewModel.hoverinfoItems.indexOf("skip")){if(n.mouse(this)[1]<-1)return;var e,r=t.parcatsViewModel.graphDiv,i=r._fullLayout,a=i._paperdiv.node().getBoundingClientRect(),l=t.parcatsViewModel.hoveron;if("color"===l?(!function(t){var e=n.select(t).datum(),r=A(e);w(r),r.each((function(){s.raiseToTop(this)})),n.select(t.parentNode).selectAll("rect.bandrect").filter((function(t){return t.color===e.color})).each((function(){s.raiseToTop(this),n.select(this).attr("stroke","black").attr("stroke-width",1.5)}))}(this),S(this,"plotly_hover",n.event)):(!function(t){n.select(t.parentNode).selectAll("rect.bandrect").each((function(t){var e=A(t);w(e),e.each((function(){s.raiseToTop(this)}))})),n.select(t.parentNode).select("rect.catrect").attr("stroke","black").attr("stroke-width",2.5)}(this),M(this,"plotly_hover",n.event)),-1===t.parcatsViewModel.hoverinfoItems.indexOf("none"))"category"===l?e=E(r,a,this):"color"===l?e=function(t,e,r){t._fullLayout._calcInverseTransform(t);var i,a,o=t._fullLayout._invScaleX,s=t._fullLayout._invScaleY,l=r.getBoundingClientRect(),c=n.select(r).datum(),f=c.categoryViewModel,h=f.parcatsViewModel,p=h.model.dimensions[f.model.dimensionInd],d=h.trace,m=l.y+l.height/2;h.dimensions.length>1&&p.displayInd===h.dimensions.length-1?(i=l.left,a="left"):(i=l.left+l.width,a="right");var g=f.model.categoryLabel,v=c.parcatsViewModel.model.count,y=0;c.categoryViewModel.bands.forEach((function(t){t.color===c.color&&(y+=t.count)}));var x=f.model.count,b=0;h.pathSelection.each((function(t){t.model.color===c.color&&(b+=t.model.count)}));var _=y/v,w=y/b,T=y/x,k={countLabel:v,categoryLabel:g,probabilityLabel:_.toFixed(3)},A=[];-1!==f.parcatsViewModel.hoverinfoItems.indexOf("count")&&A.push(["Count:",k.countLabel].join(" ")),-1!==f.parcatsViewModel.hoverinfoItems.indexOf("probability")&&(A.push("P(color \u2229 "+g+"): "+k.probabilityLabel),A.push("P("+g+" | color): "+w.toFixed(3)),A.push("P(color | "+g+"): "+T.toFixed(3)));var M=A.join("
"),S=u.mostReadable(c.color,["black","white"]);return{trace:d,x:o*(i-e.left),y:s*(m-e.top),text:M,color:c.color,borderColor:"black",fontFamily:'Monaco, "Courier New", monospace',fontColor:S,fontSize:10,idealAlign:a,hovertemplate:d.hovertemplate,hovertemplateLabels:k,eventData:[{data:d._input,fullData:d,category:g,count:v,probability:_,categorycount:x,colorcount:b,bandcolorcount:y}]}}(r,a,this):"dimension"===l&&(e=function(t,e,r){var i=[];return n.select(r.parentNode.parentNode).selectAll("g.category").select("rect.catrect").each((function(){i.push(E(t,e,this))})),i}(r,a,this)),e&&o.loneHover(e,{container:i._hoverlayer.node(),outerContainer:i._paper.node(),gd:r})}}function C(t){var e=t.parcatsViewModel;if(!e.dragDimension&&(_(e.pathSelection),T(e.dimensionSelection.selectAll("g.category")),k(e.dimensionSelection.selectAll("g.category").selectAll("rect.bandrect")),o.loneUnhover(e.graphDiv._fullLayout._hoverlayer.node()),e.pathSelection.sort(m),-1===e.hoverinfoItems.indexOf("skip"))){"color"===t.parcatsViewModel.hoveron?S(this,"plotly_unhover",n.event):M(this,"plotly_unhover",n.event)}}function P(t){"fixed"!==t.parcatsViewModel.arrangement&&(t.dragDimensionDisplayInd=t.model.displayInd,t.initialDragDimensionDisplayInds=t.parcatsViewModel.model.dimensions.map((function(t){return t.displayInd})),t.dragHasMoved=!1,t.dragCategoryDisplayInd=null,n.select(this).selectAll("g.category").select("rect.catrect").each((function(e){var r=n.mouse(this)[0],i=n.mouse(this)[1];-2<=r&&r<=e.width+2&&-2<=i&&i<=e.height+2&&(t.dragCategoryDisplayInd=e.model.displayInd,t.initialDragCategoryDisplayInds=t.model.categories.map((function(t){return t.displayInd})),e.model.dragY=e.y,s.raiseToTop(this.parentNode),n.select(this.parentNode).selectAll("rect.bandrect").each((function(e){e.yf.y+f.height/2&&(o.model.displayInd=f.model.displayInd,f.model.displayInd=l),t.dragCategoryDisplayInd=o.model.displayInd}if(null===t.dragCategoryDisplayInd||"freeform"===t.parcatsViewModel.arrangement){a.model.dragX=n.event.x;var h=t.parcatsViewModel.dimensions[r],p=t.parcatsViewModel.dimensions[i];void 0!==h&&a.model.dragXp.x&&(a.model.displayInd=p.model.displayInd,p.model.displayInd=t.dragDimensionDisplayInd),t.dragDimensionDisplayInd=a.model.displayInd}j(t.parcatsViewModel),N(t.parcatsViewModel),R(t.parcatsViewModel),D(t.parcatsViewModel)}}function O(t){if("fixed"!==t.parcatsViewModel.arrangement&&null!==t.dragDimensionDisplayInd){n.select(this).selectAll("text").attr("font-weight","normal");var e={},r=z(t.parcatsViewModel),i=t.parcatsViewModel.model.dimensions.map((function(t){return t.displayInd})),o=t.initialDragDimensionDisplayInds.some((function(t,e){return t!==i[e]}));o&&i.forEach((function(r,n){var i=t.parcatsViewModel.model.dimensions[n].containerInd;e["dimensions["+i+"].displayindex"]=r}));var s=!1;if(null!==t.dragCategoryDisplayInd){var l=t.model.categories.map((function(t){return t.displayInd}));if(s=t.initialDragCategoryDisplayInds.some((function(t,e){return t!==l[e]}))){var c=t.model.categories.slice().sort((function(t,e){return t.displayInd-e.displayInd})),u=c.map((function(t){return t.categoryValue})),f=c.map((function(t){return t.categoryLabel}));e["dimensions["+t.model.containerInd+"].categoryarray"]=[u],e["dimensions["+t.model.containerInd+"].ticktext"]=[f],e["dimensions["+t.model.containerInd+"].categoryorder"]="array"}}if(-1===t.parcatsViewModel.hoverinfoItems.indexOf("skip")&&!t.dragHasMoved&&t.potentialClickBand&&("color"===t.parcatsViewModel.hoveron?S(t.potentialClickBand,"plotly_click",n.event.sourceEvent):M(t.potentialClickBand,"plotly_click",n.event.sourceEvent)),t.model.dragX=null,null!==t.dragCategoryDisplayInd)t.parcatsViewModel.dimensions[t.dragDimensionDisplayInd].categories[t.dragCategoryDisplayInd].model.dragY=null,t.dragCategoryDisplayInd=null;t.dragDimensionDisplayInd=null,t.parcatsViewModel.dragDimension=null,t.dragHasMoved=null,t.potentialClickBand=null,j(t.parcatsViewModel),N(t.parcatsViewModel),n.transition().duration(300).ease("cubic-in-out").each((function(){R(t.parcatsViewModel,!0),D(t.parcatsViewModel,!0)})).each("end",(function(){(o||s)&&a.restyle(t.parcatsViewModel.graphDiv,e,[r])}))}}function z(t){for(var e,r=t.graphDiv._fullData,n=0;n=0;s--)u+="C"+c[s]+","+(e[s+1]+n)+" "+l[s]+","+(e[s]+n)+" "+(t[s]+r[s])+","+(e[s]+n),u+="l-"+r[s]+",0 ";return u+="Z"}function N(t){var e=t.dimensions,r=t.model,n=e.map((function(t){return t.categories.map((function(t){return t.y}))})),i=t.model.dimensions.map((function(t){return t.categories.map((function(t){return t.displayInd}))})),a=t.model.dimensions.map((function(t){return t.displayInd})),o=t.dimensions.map((function(t){return t.model.dimensionInd})),s=e.map((function(t){return t.x})),l=e.map((function(t){return t.width})),c=[];for(var u in r.paths)r.paths.hasOwnProperty(u)&&c.push(r.paths[u]);function f(t){var e=t.categoryInds.map((function(t,e){return i[e][t]}));return o.map((function(t){return e[t]}))}c.sort((function(e,r){var n=f(e),i=f(r);return"backward"===t.sortpaths&&(n.reverse(),i.reverse()),n.push(e.valueInds[0]),i.push(r.valueInds[0]),t.bundlecolors&&(n.unshift(e.rawColor),i.unshift(r.rawColor)),ni?1:0}));for(var h=new Array(c.length),p=e[0].model.count,d=e[0].categories.map((function(t){return t.height})).reduce((function(t,e){return t+e})),m=0;m0?d*(v.count/p):0;for(var y,x=new Array(n.length),b=0;b1?(t.width-80-16)/(n-1):0)*i;var a,o,s,l,c,u=[],f=t.model.maxCats,h=e.categories.length,p=e.count,d=t.height-8*(f-1),m=8*(f-h)/2,g=e.categories.map((function(t){return{displayInd:t.displayInd,categoryInd:t.categoryInd}}));for(g.sort((function(t,e){return t.displayInd-e.displayInd})),c=0;c0?o.count/p*d:0,s={key:o.valueInds[0],model:o,width:16,height:a,y:null!==o.dragY?o.dragY:m,bands:[],parcatsViewModel:t},m=m+a+8,u.push(s);return{key:e.dimensionInd,x:null!==e.dragX?e.dragX:r,y:0,width:16,model:e,categories:u,parcatsViewModel:t,dragCategoryDisplayInd:null,dragDimensionDisplayInd:null,initialDragDimensionDisplayInds:null,initialDragCategoryDisplayInds:null,dragHasMoved:null,potentialClickBand:null}}e.exports=function(t,e,r,n){h(r,t,n,e)}},{"../../components/drawing":383,"../../components/fx":401,"../../lib":498,"../../lib/svg_text_utils":524,"../../plot_api/plot_api":535,"@plotly/d3":58,"d3-interpolate":111,tinycolor2:307}],882:[function(t,e,r){"use strict";var n=t("./parcats");e.exports=function(t,e,r,i){var a=t._fullLayout,o=a._paper,s=a._size;n(t,o,e,{width:s.w,height:s.h,margin:{t:s.t,r:s.r,b:s.b,l:s.l}},r,i)}},{"./parcats":881}],883:[function(t,e,r){"use strict";var n=t("../../components/colorscale/attributes"),i=t("../../plots/cartesian/layout_attributes"),a=t("../../plots/font_attributes"),o=t("../../plots/domain").attributes,s=t("../../lib/extend").extendFlat,l=t("../../plot_api/plot_template").templatedArray;e.exports={domain:o({name:"parcoords",trace:!0,editType:"plot"}),labelangle:{valType:"angle",dflt:0,editType:"plot"},labelside:{valType:"enumerated",values:["top","bottom"],dflt:"top",editType:"plot"},labelfont:a({editType:"plot"}),tickfont:a({editType:"plot"}),rangefont:a({editType:"plot"}),dimensions:l("dimension",{label:{valType:"string",editType:"plot"},tickvals:s({},i.tickvals,{editType:"plot"}),ticktext:s({},i.ticktext,{editType:"plot"}),tickformat:s({},i.tickformat,{editType:"plot"}),visible:{valType:"boolean",dflt:!0,editType:"plot"},range:{valType:"info_array",items:[{valType:"number",editType:"plot"},{valType:"number",editType:"plot"}],editType:"plot"},constraintrange:{valType:"info_array",freeLength:!0,dimensions:"1-2",items:[{valType:"any",editType:"plot"},{valType:"any",editType:"plot"}],editType:"plot"},multiselect:{valType:"boolean",dflt:!0,editType:"plot"},values:{valType:"data_array",editType:"calc"},editType:"calc"}),line:s({editType:"calc"},n("line",{colorscaleDflt:"Viridis",autoColorDflt:!1,editTypeOverride:"calc"}))}},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plot_api/plot_template":538,"../../plots/cartesian/layout_attributes":564,"../../plots/domain":579,"../../plots/font_attributes":580}],884:[function(t,e,r){"use strict";var n=t("./constants"),i=t("@plotly/d3"),a=t("../../lib/gup").keyFun,o=t("../../lib/gup").repeat,s=t("../../lib").sorterAsc,l=t("../../lib").strTranslate,c=n.bar.snapRatio;function u(t,e){return t*(1-c)+e*c}var f=n.bar.snapClose;function h(t,e){return t*(1-f)+e*f}function p(t,e,r,n){if(function(t,e){for(var r=0;r=e[r][0]&&t<=e[r][1])return!0;return!1}(r,n))return r;var i=t?-1:1,a=0,o=e.length-1;if(i<0){var s=a;a=o,o=s}for(var l=e[a],c=l,f=a;i*fe){h=r;break}}if(a=u,isNaN(a)&&(a=isNaN(f)||isNaN(h)?isNaN(f)?h:f:e-c[f][1]t[1]+r||e=.9*t[1]+.1*t[0]?"n":e<=.9*t[0]+.1*t[1]?"s":"ns"}(d,e);m&&(o.interval=l[a],o.intervalPix=d,o.region=m)}}if(t.ordinal&&!o.region){var g=t.unitTickvals,y=t.unitToPaddedPx.invert(e);for(r=0;r=x[0]&&y<=x[1]){o.clickableOrdinalRange=x;break}}}return o}function w(t,e){i.event.sourceEvent.stopPropagation();var r=e.height-i.mouse(t)[1]-2*n.verticalPadding,a=e.brush.svgBrush;a.wasDragged=!0,a._dragging=!0,a.grabbingBar?a.newExtent=[r-a.grabPoint,r+a.barLength-a.grabPoint].map(e.unitToPaddedPx.invert):a.newExtent=[a.startExtent,e.unitToPaddedPx.invert(r)].sort(s),e.brush.filterSpecified=!0,a.extent=a.stayingIntervals.concat([a.newExtent]),a.brushCallback(e),b(t.parentNode)}function T(t,e){var r=_(e,e.height-i.mouse(t)[1]-2*n.verticalPadding),a="crosshair";r.clickableOrdinalRange?a="pointer":r.region&&(a=r.region+"-resize"),i.select(document.body).style("cursor",a)}function k(t){t.on("mousemove",(function(t){i.event.preventDefault(),t.parent.inBrushDrag||T(this,t)})).on("mouseleave",(function(t){t.parent.inBrushDrag||y()})).call(i.behavior.drag().on("dragstart",(function(t){!function(t,e){i.event.sourceEvent.stopPropagation();var r=e.height-i.mouse(t)[1]-2*n.verticalPadding,a=e.unitToPaddedPx.invert(r),o=e.brush,s=_(e,r),l=s.interval,c=o.svgBrush;if(c.wasDragged=!1,c.grabbingBar="ns"===s.region,c.grabbingBar){var u=l.map(e.unitToPaddedPx);c.grabPoint=r-u[0]-n.verticalPadding,c.barLength=u[1]-u[0]}c.clickableOrdinalRange=s.clickableOrdinalRange,c.stayingIntervals=e.multiselect&&o.filterSpecified?o.filter.getConsolidated():[],l&&(c.stayingIntervals=c.stayingIntervals.filter((function(t){return t[0]!==l[0]&&t[1]!==l[1]}))),c.startExtent=s.region?l["s"===s.region?1:0]:a,e.parent.inBrushDrag=!0,c.brushStartCallback()}(this,t)})).on("drag",(function(t){w(this,t)})).on("dragend",(function(t){!function(t,e){var r=e.brush,n=r.filter,a=r.svgBrush;a._dragging||(T(t,e),w(t,e),e.brush.svgBrush.wasDragged=!1),a._dragging=!1,i.event.sourceEvent.stopPropagation();var o=a.grabbingBar;if(a.grabbingBar=!1,a.grabLocation=void 0,e.parent.inBrushDrag=!1,y(),!a.wasDragged)return a.wasDragged=void 0,a.clickableOrdinalRange?r.filterSpecified&&e.multiselect?a.extent.push(a.clickableOrdinalRange):(a.extent=[a.clickableOrdinalRange],r.filterSpecified=!0):o?(a.extent=a.stayingIntervals,0===a.extent.length&&M(r)):M(r),a.brushCallback(e),b(t.parentNode),void a.brushEndCallback(r.filterSpecified?n.getConsolidated():[]);var s=function(){n.set(n.getConsolidated())};if(e.ordinal){var l=e.unitTickvals;l[l.length-1]a.newExtent[0];a.extent=a.stayingIntervals.concat(c?[a.newExtent]:[]),a.extent.length||M(r),a.brushCallback(e),c?b(t.parentNode,s):(s(),b(t.parentNode))}else s();a.brushEndCallback(r.filterSpecified?n.getConsolidated():[])}(this,t)})))}function A(t,e){return t[0]-e[0]}function M(t){t.filterSpecified=!1,t.svgBrush.extent=[[-1/0,1/0]]}function S(t){for(var e,r=t.slice(),n=[],i=r.shift();i;){for(e=i.slice();(i=r.shift())&&i[0]<=e[1];)e[1]=Math.max(e[1],i[1]);n.push(e)}return 1===n.length&&n[0][0]>n[0][1]&&(n=[]),n}e.exports={makeBrush:function(t,e,r,n,i,a){var o,l=function(){var t,e,r=[];return{set:function(n){1===(r=n.map((function(t){return t.slice().sort(s)})).sort(A)).length&&r[0][0]===-1/0&&r[0][1]===1/0&&(r=[[0,-1]]),t=S(r),e=r.reduce((function(t,e){return[Math.min(t[0],e[0]),Math.max(t[1],e[1])]}),[1/0,-1/0])},get:function(){return r.slice()},getConsolidated:function(){return t},getBounds:function(){return e}}}();return l.set(r),{filter:l,filterSpecified:e,svgBrush:{extent:[],brushStartCallback:n,brushCallback:(o=i,function(t){var e=t.brush,r=function(t){return t.svgBrush.extent.map((function(t){return t.slice()}))}(e).slice();e.filter.set(r),o()}),brushEndCallback:a}}},ensureAxisBrush:function(t,e){var r=t.selectAll("."+n.cn.axisBrush).data(o,a);r.enter().append("g").classed(n.cn.axisBrush,!0),function(t,e){var r=t.selectAll(".background").data(o);r.enter().append("rect").classed("background",!0).call(d).call(m).style("pointer-events","auto").attr("transform",l(0,n.verticalPadding)),r.call(k).attr("height",(function(t){return t.height-n.verticalPadding}));var i=t.selectAll(".highlight-shadow").data(o);i.enter().append("line").classed("highlight-shadow",!0).attr("x",-n.bar.width/2).attr("stroke-width",n.bar.width+n.bar.strokeWidth).attr("stroke",e).attr("opacity",n.bar.strokeOpacity).attr("stroke-linecap","butt"),i.attr("y1",(function(t){return t.height})).call(x);var a=t.selectAll(".highlight").data(o);a.enter().append("line").classed("highlight",!0).attr("x",-n.bar.width/2).attr("stroke-width",n.bar.width-n.bar.strokeWidth).attr("stroke",n.bar.fillColor).attr("opacity",n.bar.fillOpacity).attr("stroke-linecap","butt"),a.attr("y1",(function(t){return t.height})).call(x)}(r,e)},cleanRanges:function(t,e){if(Array.isArray(t[0])?(t=t.map((function(t){return t.sort(s)})),t=e.multiselect?S(t.sort(A)):[t[0]]):t=[t.sort(s)],e.tickvals){var r=e.tickvals.slice().sort(s);if(!(t=t.map((function(t){var e=[p(0,r,t[0],[]),p(1,r,t[1],[])];if(e[1]>e[0])return e})).filter((function(t){return t}))).length)return}return t.length>1?t:t[0]}}},{"../../lib":498,"../../lib/gup":495,"./constants":888,"@plotly/d3":58}],885:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),calc:t("./calc"),colorbar:{container:"line",min:"cmin",max:"cmax"},moduleType:"trace",name:"parcoords",basePlotModule:t("./base_plot"),categories:["gl","regl","noOpacity","noHover"],meta:{}}},{"./attributes":883,"./base_plot":886,"./calc":887,"./defaults":889}],886:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../plots/get_data").getModuleCalcData,a=t("./plot"),o=t("../../constants/xmlns_namespaces");r.name="parcoords",r.plot=function(t){var e=i(t.calcdata,"parcoords")[0];e.length&&a(t,e)},r.clean=function(t,e,r,n){var i=n._has&&n._has("parcoords"),a=e._has&&e._has("parcoords");i&&!a&&(n._paperdiv.selectAll(".parcoords").remove(),n._glimages.selectAll("*").remove())},r.toSVG=function(t){var e=t._fullLayout._glimages,r=n.select(t).selectAll(".svg-container");r.filter((function(t,e){return e===r.size()-1})).selectAll(".gl-canvas-context, .gl-canvas-focus").each((function(){var t=this.toDataURL("image/png");e.append("svg:image").attr({xmlns:o.svg,"xlink:href":t,preserveAspectRatio:"none",x:0,y:0,width:this.style.width,height:this.style.height})})),window.setTimeout((function(){n.selectAll("#filterBarPattern").attr("id","filterBarPattern")}),60)}},{"../../constants/xmlns_namespaces":475,"../../plots/get_data":588,"./plot":895,"@plotly/d3":58}],887:[function(t,e,r){"use strict";var n=t("../../lib").isArrayOrTypedArray,i=t("../../components/colorscale"),a=t("../../lib/gup").wrap;e.exports=function(t,e){var r,o;return i.hasColorscale(e,"line")&&n(e.line.color)?(r=e.line.color,o=i.extractOpts(e.line).colorscale,i.calc(t,e,{vals:r,containerStr:"line",cLetter:"c"})):(r=function(t){for(var e=new Array(t),r=0;rf&&(n.log("parcoords traces support up to "+f+" dimensions at the moment"),d.splice(f));var m=s(t,e,{name:"dimensions",layout:l,handleItemDefaults:p}),g=function(t,e,r,o,s){var l=s("line.color",r);if(i(t,"line")&&n.isArrayOrTypedArray(l)){if(l.length)return s("line.colorscale"),a(t,e,o,s,{prefix:"line.",cLetter:"c"}),l.length;e.line.color=r}return 1/0}(t,e,r,l,u);o(e,l,u),Array.isArray(m)&&m.length||(e.visible=!1),h(e,m,"values",g);var v={family:l.font.family,size:Math.round(l.font.size/1.2),color:l.font.color};n.coerceFont(u,"labelfont",v),n.coerceFont(u,"tickfont",v),n.coerceFont(u,"rangefont",v),u("labelangle"),u("labelside")}},{"../../components/colorscale/defaults":371,"../../components/colorscale/helpers":372,"../../lib":498,"../../plots/array_container_defaults":544,"../../plots/cartesian/axes":549,"../../plots/domain":579,"./attributes":883,"./axisbrush":884,"./constants":888,"./merge_length":893}],890:[function(t,e,r){"use strict";var n=t("../../lib").isTypedArray;r.convertTypedArray=function(t){return n(t)?Array.prototype.slice.call(t):t},r.isOrdinal=function(t){return!!t.tickvals},r.isVisible=function(t){return t.visible||!("visible"in t)}},{"../../lib":498}],891:[function(t,e,r){"use strict";var n=t("./base_index");n.plot=t("./plot"),e.exports=n},{"./base_index":885,"./plot":895}],892:[function(t,e,r){"use strict";var n=t("glslify"),i=n(["precision highp float;\n#define GLSLIFY 1\n\nvarying vec4 fragColor;\n\nattribute vec4 p01_04, p05_08, p09_12, p13_16,\n p17_20, p21_24, p25_28, p29_32,\n p33_36, p37_40, p41_44, p45_48,\n p49_52, p53_56, p57_60, colors;\n\nuniform mat4 dim0A, dim1A, dim0B, dim1B, dim0C, dim1C, dim0D, dim1D,\n loA, hiA, loB, hiB, loC, hiC, loD, hiD;\n\nuniform vec2 resolution, viewBoxPos, viewBoxSize;\nuniform float maskHeight;\nuniform float drwLayer; // 0: context, 1: focus, 2: pick\nuniform vec4 contextColor;\nuniform sampler2D maskTexture, palette;\n\nbool isPick = (drwLayer > 1.5);\nbool isContext = (drwLayer < 0.5);\n\nconst vec4 ZEROS = vec4(0.0, 0.0, 0.0, 0.0);\nconst vec4 UNITS = vec4(1.0, 1.0, 1.0, 1.0);\n\nfloat val(mat4 p, mat4 v) {\n return dot(matrixCompMult(p, v) * UNITS, UNITS);\n}\n\nfloat axisY(float ratio, mat4 A, mat4 B, mat4 C, mat4 D) {\n float y1 = val(A, dim0A) + val(B, dim0B) + val(C, dim0C) + val(D, dim0D);\n float y2 = val(A, dim1A) + val(B, dim1B) + val(C, dim1C) + val(D, dim1D);\n return y1 * (1.0 - ratio) + y2 * ratio;\n}\n\nint iMod(int a, int b) {\n return a - b * (a / b);\n}\n\nbool fOutside(float p, float lo, float hi) {\n return (lo < hi) && (lo > p || p > hi);\n}\n\nbool vOutside(vec4 p, vec4 lo, vec4 hi) {\n return (\n fOutside(p[0], lo[0], hi[0]) ||\n fOutside(p[1], lo[1], hi[1]) ||\n fOutside(p[2], lo[2], hi[2]) ||\n fOutside(p[3], lo[3], hi[3])\n );\n}\n\nbool mOutside(mat4 p, mat4 lo, mat4 hi) {\n return (\n vOutside(p[0], lo[0], hi[0]) ||\n vOutside(p[1], lo[1], hi[1]) ||\n vOutside(p[2], lo[2], hi[2]) ||\n vOutside(p[3], lo[3], hi[3])\n );\n}\n\nbool outsideBoundingBox(mat4 A, mat4 B, mat4 C, mat4 D) {\n return mOutside(A, loA, hiA) ||\n mOutside(B, loB, hiB) ||\n mOutside(C, loC, hiC) ||\n mOutside(D, loD, hiD);\n}\n\nbool outsideRasterMask(mat4 A, mat4 B, mat4 C, mat4 D) {\n mat4 pnts[4];\n pnts[0] = A;\n pnts[1] = B;\n pnts[2] = C;\n pnts[3] = D;\n\n for(int i = 0; i < 4; ++i) {\n for(int j = 0; j < 4; ++j) {\n for(int k = 0; k < 4; ++k) {\n if(0 == iMod(\n int(255.0 * texture2D(maskTexture,\n vec2(\n (float(i * 2 + j / 2) + 0.5) / 8.0,\n (pnts[i][j][k] * (maskHeight - 1.0) + 1.0) / maskHeight\n ))[3]\n ) / int(pow(2.0, float(iMod(j * 4 + k, 8)))),\n 2\n )) return true;\n }\n }\n }\n return false;\n}\n\nvec4 position(bool isContext, float v, mat4 A, mat4 B, mat4 C, mat4 D) {\n float x = 0.5 * sign(v) + 0.5;\n float y = axisY(x, A, B, C, D);\n float z = 1.0 - abs(v);\n\n z += isContext ? 0.0 : 2.0 * float(\n outsideBoundingBox(A, B, C, D) ||\n outsideRasterMask(A, B, C, D)\n );\n\n return vec4(\n 2.0 * (vec2(x, y) * viewBoxSize + viewBoxPos) / resolution - 1.0,\n z,\n 1.0\n );\n}\n\nvoid main() {\n mat4 A = mat4(p01_04, p05_08, p09_12, p13_16);\n mat4 B = mat4(p17_20, p21_24, p25_28, p29_32);\n mat4 C = mat4(p33_36, p37_40, p41_44, p45_48);\n mat4 D = mat4(p49_52, p53_56, p57_60, ZEROS);\n\n float v = colors[3];\n\n gl_Position = position(isContext, v, A, B, C, D);\n\n fragColor =\n isContext ? vec4(contextColor) :\n isPick ? vec4(colors.rgb, 1.0) : texture2D(palette, vec2(abs(v), 0.5));\n}\n"]),a=n(["precision highp float;\n#define GLSLIFY 1\n\nvarying vec4 fragColor;\n\nvoid main() {\n gl_FragColor = fragColor;\n}\n"]),o=t("./constants").maxDimensionCount,s=t("../../lib"),l=new Uint8Array(4),c=new Uint8Array(4),u={shape:[256,1],format:"rgba",type:"uint8",mag:"nearest",min:"nearest"};function f(t,e,r,n,i){var a=t._gl;a.enable(a.SCISSOR_TEST),a.scissor(e,r,n,i),t.clear({color:[0,0,0,0],depth:1})}function h(t,e,r,n,i,a){var o=a.key;r.drawCompleted||(!function(t){t.read({x:0,y:0,width:1,height:1,data:l})}(t),r.drawCompleted=!0),function s(l){var c=Math.min(n,i-l*n);0===l&&(window.cancelAnimationFrame(r.currentRafs[o]),delete r.currentRafs[o],f(t,a.scissorX,a.scissorY,a.scissorWidth,a.viewBoxSize[1])),r.clearOnly||(a.count=2*c,a.offset=2*l*n,e(a),l*n+c>>8*e)%256/255}function m(t,e,r){for(var n=new Array(8*e),i=0,a=0;au&&(u=t[i].dim1.canvasX,o=i);0===s&&f(T,0,0,r.canvasWidth,r.canvasHeight);var p=function(t){var e,r,n,i=[[],[]];for(n=0;n<64;n++){var a=!t&&no._length&&(S=S.slice(0,o._length));var L,C=o.tickvals;function P(t,e){return{val:t,text:L[e]}}function I(t,e){return t.val-e.val}if(Array.isArray(C)&&C.length){L=o.ticktext,Array.isArray(L)&&L.length?L.length>C.length?L=L.slice(0,C.length):C.length>L.length&&(C=C.slice(0,L.length)):L=C.map(a(o.tickformat));for(var O=1;O=r||l>=i)return;var c=t.lineLayer.readPixel(s,i-1-l),u=0!==c[3],f=u?c[2]+256*(c[1]+256*c[0]):null,h={x:s,y:l,clientX:e.clientX,clientY:e.clientY,dataIndex:t.model.key,curveNumber:f};f!==B&&(u?a.hover(h):a.unhover&&a.unhover(h),B=f)}})),F.style("opacity",(function(t){return t.pick?0:1})),h.style("background","rgba(255, 255, 255, 0)");var N=h.selectAll("."+y.cn.parcoords).data(R,d);N.exit().remove(),N.enter().append("g").classed(y.cn.parcoords,!0).style("shape-rendering","crispEdges").style("pointer-events","none"),N.attr("transform",(function(t){return c(t.model.translateX,t.model.translateY)}));var j=N.selectAll("."+y.cn.parcoordsControlView).data(m,d);j.enter().append("g").classed(y.cn.parcoordsControlView,!0),j.attr("transform",(function(t){return c(t.model.pad.l,t.model.pad.t)}));var U=j.selectAll("."+y.cn.yAxis).data((function(t){return t.dimensions}),d);U.enter().append("g").classed(y.cn.yAxis,!0),j.each((function(t){O(U,t,_)})),F.each((function(t){if(t.viewModel){!t.lineLayer||a?t.lineLayer=b(this,t):t.lineLayer.update(t),(t.key||0===t.key)&&(t.viewModel[t.key]=t.lineLayer);var e=!t.context||a;t.lineLayer.render(t.viewModel.panels,e)}})),U.attr("transform",(function(t){return c(t.xScale(t.xIndex),0)})),U.call(n.behavior.drag().origin((function(t){return t})).on("drag",(function(t){var e=t.parent;S.linePickActive(!1),t.x=Math.max(-y.overdrag,Math.min(t.model.width+y.overdrag,n.event.x)),t.canvasX=t.x*t.model.canvasPixelRatio,U.sort((function(t,e){return t.x-e.x})).each((function(e,r){e.xIndex=r,e.x=t===e?e.x:e.xScale(e.xIndex),e.canvasX=e.x*e.model.canvasPixelRatio})),O(U,e,_),U.filter((function(e){return 0!==Math.abs(t.xIndex-e.xIndex)})).attr("transform",(function(t){return c(t.xScale(t.xIndex),0)})),n.select(this).attr("transform",c(t.x,0)),U.each((function(r,n,i){i===t.parent.key&&(e.dimensions[n]=r)})),e.contextLayer&&e.contextLayer.render(e.panels,!1,!E(e)),e.focusLayer.render&&e.focusLayer.render(e.panels)})).on("dragend",(function(t){var e=t.parent;t.x=t.xScale(t.xIndex),t.canvasX=t.x*t.model.canvasPixelRatio,O(U,e,_),n.select(this).attr("transform",(function(t){return c(t.x,0)})),e.contextLayer&&e.contextLayer.render(e.panels,!1,!E(e)),e.focusLayer&&e.focusLayer.render(e.panels),e.pickLayer&&e.pickLayer.render(e.panels,!0),S.linePickActive(!0),a&&a.axesMoved&&a.axesMoved(e.key,e.dimensions.map((function(t){return t.crossfilterDimensionIndex})))}))),U.exit().remove();var V=U.selectAll("."+y.cn.axisOverlays).data(m,d);V.enter().append("g").classed(y.cn.axisOverlays,!0),V.selectAll("."+y.cn.axis).remove();var H=V.selectAll("."+y.cn.axis).data(m,d);H.enter().append("g").classed(y.cn.axis,!0),H.each((function(t){var e=t.model.height/t.model.tickDistance,r=t.domainScale,i=r.domain();n.select(this).call(n.svg.axis().orient("left").tickSize(4).outerTickSize(2).ticks(e,t.tickFormat).tickValues(t.ordinal?i:null).tickFormat((function(e){return v.isOrdinal(t)?e:z(t.model.dimensions[t.visibleIndex],e)})).scale(r)),f.font(H.selectAll("text"),t.model.tickFont)})),H.selectAll(".domain, .tick>line").attr("fill","none").attr("stroke","black").attr("stroke-opacity",.25).attr("stroke-width","1px"),H.selectAll("text").style("text-shadow",u.makeTextShadow(T)).style("cursor","default");var q=V.selectAll("."+y.cn.axisHeading).data(m,d);q.enter().append("g").classed(y.cn.axisHeading,!0);var G=q.selectAll("."+y.cn.axisTitle).data(m,d);G.enter().append("text").classed(y.cn.axisTitle,!0).attr("text-anchor","middle").style("cursor","ew-resize").style("pointer-events","auto"),G.text((function(t){return t.label})).each((function(e){var r=n.select(this);f.font(r,e.model.labelFont),u.convertToTspans(r,t)})).attr("transform",(function(t){var e=I(t.model.labelAngle,t.model.labelSide),r=y.axisTitleOffset;return(e.dir>0?"":c(0,2*r+t.model.height))+l(e.degrees)+c(-r*e.dx,-r*e.dy)})).attr("text-anchor",(function(t){var e=I(t.model.labelAngle,t.model.labelSide);return 2*Math.abs(e.dx)>Math.abs(e.dy)?e.dir*e.dx<0?"start":"end":"middle"}));var Y=V.selectAll("."+y.cn.axisExtent).data(m,d);Y.enter().append("g").classed(y.cn.axisExtent,!0);var W=Y.selectAll("."+y.cn.axisExtentTop).data(m,d);W.enter().append("g").classed(y.cn.axisExtentTop,!0),W.attr("transform",c(0,-y.axisExtentOffset));var X=W.selectAll("."+y.cn.axisExtentTopText).data(m,d);X.enter().append("text").classed(y.cn.axisExtentTopText,!0).call(P),X.text((function(t){return D(t,!0)})).each((function(t){f.font(n.select(this),t.model.rangeFont)}));var Z=Y.selectAll("."+y.cn.axisExtentBottom).data(m,d);Z.enter().append("g").classed(y.cn.axisExtentBottom,!0),Z.attr("transform",(function(t){return c(0,t.model.height+y.axisExtentOffset)}));var J=Z.selectAll("."+y.cn.axisExtentBottomText).data(m,d);J.enter().append("text").classed(y.cn.axisExtentBottomText,!0).attr("dy","0.75em").call(P),J.text((function(t){return D(t,!1)})).each((function(t){f.font(n.select(this),t.model.rangeFont)})),x.ensureAxisBrush(V,T)}},{"../../components/colorscale":373,"../../components/drawing":383,"../../lib":498,"../../lib/gup":495,"../../lib/svg_text_utils":524,"../../plots/cartesian/axes":549,"./axisbrush":884,"./constants":888,"./helpers":890,"./lines":892,"@plotly/d3":58,"color-rgba":86}],895:[function(t,e,r){"use strict";var n=t("./parcoords"),i=t("../../lib/prepare_regl"),a=t("./helpers").isVisible,o={};function s(t,e,r){var n=e.indexOf(r),i=t.indexOf(n);return-1===i&&(i+=e.length),i}(e.exports=function(t,e){var r=t._fullLayout;if(i(t,[],o)){var l={},c={},u={},f={},h=r._size;e.forEach((function(e,r){var n=e[0].trace;u[r]=n.index;var i=f[r]=n._fullInput.index;l[r]=t.data[i].dimensions,c[r]=t.data[i].dimensions.slice()}));n(t,e,{width:h.w,height:h.h,margin:{t:h.t,r:h.r,b:h.b,l:h.l}},{filterChanged:function(e,n,i){var a=c[e][n],o=i.map((function(t){return t.slice()})),s="dimensions["+n+"].constraintrange",l=r._tracePreGUI[t._fullData[u[e]]._fullInput.uid];if(void 0===l[s]){var h=a.constraintrange;l[s]=h||null}var p=t._fullData[u[e]].dimensions[n];o.length?(1===o.length&&(o=o[0]),a.constraintrange=o,p.constraintrange=o.slice(),o=[o]):(delete a.constraintrange,delete p.constraintrange,o=null);var d={};d[s]=o,t.emit("plotly_restyle",[d,[f[e]]])},hover:function(e){t.emit("plotly_hover",e)},unhover:function(e){t.emit("plotly_unhover",e)},axesMoved:function(e,r){var n=function(t,e){return function(r,n){return s(t,e,r)-s(t,e,n)}}(r,c[e].filter(a));l[e].sort(n),c[e].filter((function(t){return!a(t)})).sort((function(t){return c[e].indexOf(t)})).forEach((function(t){l[e].splice(l[e].indexOf(t),1),l[e].splice(c[e].indexOf(t),0,t)})),t.emit("plotly_restyle",[{dimensions:[l[e]]},[f[e]]])}})}}).reglPrecompiled=o},{"../../lib/prepare_regl":511,"./helpers":890,"./parcoords":894}],896:[function(t,e,r){"use strict";var n=t("../../plots/attributes"),i=t("../../plots/domain").attributes,a=t("../../plots/font_attributes"),o=t("../../components/color/attributes"),s=t("../../plots/template_attributes").hovertemplateAttrs,l=t("../../plots/template_attributes").texttemplateAttrs,c=t("../../lib/extend").extendFlat,u=a({editType:"plot",arrayOk:!0,colorEditType:"plot"});e.exports={labels:{valType:"data_array",editType:"calc"},label0:{valType:"number",dflt:0,editType:"calc"},dlabel:{valType:"number",dflt:1,editType:"calc"},values:{valType:"data_array",editType:"calc"},marker:{colors:{valType:"data_array",editType:"calc"},line:{color:{valType:"color",dflt:o.defaultLine,arrayOk:!0,editType:"style"},width:{valType:"number",min:0,dflt:0,arrayOk:!0,editType:"style"},editType:"calc"},editType:"calc"},text:{valType:"data_array",editType:"plot"},hovertext:{valType:"string",dflt:"",arrayOk:!0,editType:"style"},scalegroup:{valType:"string",dflt:"",editType:"calc"},textinfo:{valType:"flaglist",flags:["label","text","value","percent"],extras:["none"],editType:"calc"},hoverinfo:c({},n.hoverinfo,{flags:["label","text","value","percent","name"]}),hovertemplate:s({},{keys:["label","color","value","percent","text"]}),texttemplate:l({editType:"plot"},{keys:["label","color","value","percent","text"]}),textposition:{valType:"enumerated",values:["inside","outside","auto","none"],dflt:"auto",arrayOk:!0,editType:"plot"},textfont:c({},u,{}),insidetextorientation:{valType:"enumerated",values:["horizontal","radial","tangential","auto"],dflt:"auto",editType:"plot"},insidetextfont:c({},u,{}),outsidetextfont:c({},u,{}),automargin:{valType:"boolean",dflt:!1,editType:"plot"},title:{text:{valType:"string",dflt:"",editType:"plot"},font:c({},u,{}),position:{valType:"enumerated",values:["top left","top center","top right","middle center","bottom left","bottom center","bottom right"],editType:"plot"},editType:"plot"},domain:i({name:"pie",trace:!0,editType:"calc"}),hole:{valType:"number",min:0,max:1,dflt:0,editType:"calc"},sort:{valType:"boolean",dflt:!0,editType:"calc"},direction:{valType:"enumerated",values:["clockwise","counterclockwise"],dflt:"counterclockwise",editType:"calc"},rotation:{valType:"number",min:-360,max:360,dflt:0,editType:"calc"},pull:{valType:"number",min:0,max:1,dflt:0,arrayOk:!0,editType:"calc"},_deprecated:{title:{valType:"string",dflt:"",editType:"calc"},titlefont:c({},u,{}),titleposition:{valType:"enumerated",values:["top left","top center","top right","middle center","bottom left","bottom center","bottom right"],editType:"calc"}}}},{"../../components/color/attributes":360,"../../lib/extend":488,"../../plots/attributes":545,"../../plots/domain":579,"../../plots/font_attributes":580,"../../plots/template_attributes":628}],897:[function(t,e,r){"use strict";var n=t("../../plots/plots");r.name="pie",r.plot=function(t,e,i,a){n.plotBasePlot(r.name,t,e,i,a)},r.clean=function(t,e,i,a){n.cleanBasePlot(r.name,t,e,i,a)}},{"../../plots/plots":614}],898:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("tinycolor2"),a=t("../../components/color"),o={};function s(t){return function(e,r){return!!e&&(!!(e=i(e)).isValid()&&(e=a.addOpacity(e,e.getAlpha()),t[r]||(t[r]=e),e))}}function l(t,e){var r,n=JSON.stringify(t),a=e[n];if(!a){for(a=t.slice(),r=0;r=0})),("funnelarea"===e.type?v:e.sort)&&a.sort((function(t,e){return e.v-t.v})),a[0]&&(a[0].vTotal=g),a},crossTraceCalc:function(t,e){var r=(e||{}).type;r||(r="pie");var n=t._fullLayout,i=t.calcdata,a=n[r+"colorway"],s=n["_"+r+"colormap"];n["extend"+r+"colors"]&&(a=l(a,o));for(var c=0,u=0;u0){s=!0;break}}s||(o=0)}return{hasLabels:r,hasValues:a,len:o}}e.exports={handleLabelsAndValues:l,supplyDefaults:function(t,e,r,n){function c(r,n){return i.coerce(t,e,a,r,n)}var u=l(c("labels"),c("values")),f=u.len;if(e._hasLabels=u.hasLabels,e._hasValues=u.hasValues,!e._hasLabels&&e._hasValues&&(c("label0"),c("dlabel")),f){e._length=f,c("marker.line.width")&&c("marker.line.color"),c("marker.colors"),c("scalegroup");var h,p=c("text"),d=c("texttemplate");if(d||(h=c("textinfo",Array.isArray(p)?"text+percent":"percent")),c("hovertext"),c("hovertemplate"),d||h&&"none"!==h){var m=c("textposition");s(t,e,n,c,m,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),(Array.isArray(m)||"auto"===m||"outside"===m)&&c("automargin"),("inside"===m||"auto"===m||Array.isArray(m))&&c("insidetextorientation")}o(e,n,c);var g=c("hole");if(c("title.text")){var v=c("title.position",g?"middle center":"top center");g||"middle center"!==v||(e.title.position="top center"),i.coerceFont(c,"title.font",n.font)}c("sort"),c("direction"),c("rotation"),c("pull")}else e.visible=!1}}},{"../../lib":498,"../../plots/domain":579,"../bar/defaults":647,"./attributes":896,"fast-isnumeric":185}],900:[function(t,e,r){"use strict";var n=t("../../components/fx/helpers").appendArrayMultiPointValues;e.exports=function(t,e){var r={curveNumber:e.index,pointNumbers:t.pts,data:e._input,fullData:e,label:t.label,color:t.color,value:t.v,percent:t.percent,text:t.text,bbox:t.bbox,v:t.v};return 1===t.pts.length&&(r.pointNumber=r.i=t.pts[0]),n(r,e,t.pts),"funnelarea"===e.type&&(delete r.v,delete r.i),r}},{"../../components/fx/helpers":397}],901:[function(t,e,r){"use strict";var n=t("../../lib");function i(t){return-1!==t.indexOf("e")?t.replace(/[.]?0+e/,"e"):-1!==t.indexOf(".")?t.replace(/[.]?0+$/,""):t}r.formatPiePercent=function(t,e){var r=i((100*t).toPrecision(3));return n.numSeparate(r,e)+"%"},r.formatPieValue=function(t,e){var r=i(t.toPrecision(10));return n.numSeparate(r,e)},r.getFirstFilled=function(t,e){if(Array.isArray(t))for(var r=0;r"),name:f.hovertemplate||-1!==h.indexOf("name")?f.name:void 0,idealAlign:t.pxmid[0]<0?"left":"right",color:g.castOption(_.bgcolor,t.pts)||t.color,borderColor:g.castOption(_.bordercolor,t.pts),fontFamily:g.castOption(w.family,t.pts),fontSize:g.castOption(w.size,t.pts),fontColor:g.castOption(w.color,t.pts),nameLength:g.castOption(_.namelength,t.pts),textAlign:g.castOption(_.align,t.pts),hovertemplate:g.castOption(f.hovertemplate,t.pts),hovertemplateLabels:t,eventData:[v(t,f)]},{container:r._hoverlayer.node(),outerContainer:r._paper.node(),gd:e,inOut_bbox:T}),t.bbox=T[0],c._hasHoverLabel=!0}c._hasHoverEvent=!0,e.emit("plotly_hover",{points:[v(t,f)],event:n.event})}})),t.on("mouseout",(function(t){var r=e._fullLayout,i=e._fullData[c.index],o=n.select(this).datum();c._hasHoverEvent&&(t.originalEvent=n.event,e.emit("plotly_unhover",{points:[v(o,i)],event:n.event}),c._hasHoverEvent=!1),c._hasHoverLabel&&(a.loneUnhover(r._hoverlayer.node()),c._hasHoverLabel=!1)})),t.on("click",(function(t){var r=e._fullLayout,i=e._fullData[c.index];e._dragging||!1===r.hovermode||(e._hoverdata=[v(t,i)],a.click(e,n.event))}))}function b(t,e,r){var n=g.castOption(t.insidetextfont.color,e.pts);!n&&t._input.textfont&&(n=g.castOption(t._input.textfont.color,e.pts));var i=g.castOption(t.insidetextfont.family,e.pts)||g.castOption(t.textfont.family,e.pts)||r.family,a=g.castOption(t.insidetextfont.size,e.pts)||g.castOption(t.textfont.size,e.pts)||r.size;return{color:n||o.contrast(e.color),family:i,size:a}}function _(t,e){for(var r,n,i=0;ie&&e>n||r=-4;g-=2)v(Math.PI*g,"tan");for(g=4;g>=-4;g-=2)v(Math.PI*(g+1),"tan")}if(f||p){for(g=4;g>=-4;g-=2)v(Math.PI*(g+1.5),"rad");for(g=4;g>=-4;g-=2)v(Math.PI*(g+.5),"rad")}}if(s||d||f){var y=Math.sqrt(t.width*t.width+t.height*t.height);if((a={scale:i*n*2/y,rCenter:1-i,rotate:0}).textPosAngle=(e.startangle+e.stopangle)/2,a.scale>=1)return a;m.push(a)}(d||p)&&((a=T(t,n,o,l,c)).textPosAngle=(e.startangle+e.stopangle)/2,m.push(a)),(d||h)&&((a=k(t,n,o,l,c)).textPosAngle=(e.startangle+e.stopangle)/2,m.push(a));for(var x=0,b=0,_=0;_=1)break}return m[x]}function T(t,e,r,n,i){e=Math.max(0,e-2*m);var a=t.width/t.height,o=S(a,n,e,r);return{scale:2*o/t.height,rCenter:A(a,o/e),rotate:M(i)}}function k(t,e,r,n,i){e=Math.max(0,e-2*m);var a=t.height/t.width,o=S(a,n,e,r);return{scale:2*o/t.width,rCenter:A(a,o/e),rotate:M(i+Math.PI/2)}}function A(t,e){return Math.cos(e)-t*e}function M(t){return(180/Math.PI*t+720)%180-90}function S(t,e,r,n){var i=t+1/(2*Math.tan(e));return r*Math.min(1/(Math.sqrt(i*i+.5)+i),n/(Math.sqrt(t*t+n/2)+t))}function E(t,e){return t.v!==e.vTotal||e.trace.hole?Math.min(1/(1+1/Math.sin(t.halfangle)),t.ring/2):1}function L(t,e){var r=e.pxmid[0],n=e.pxmid[1],i=t.width/2,a=t.height/2;return r<0&&(i*=-1),n<0&&(a*=-1),{scale:1,rCenter:1,rotate:0,x:i+Math.abs(a)*(i>0?1:-1)/2,y:a/(1+r*r/(n*n)),outside:!0}}function C(t,e){var r,n,i,a=t.trace,o={x:t.cx,y:t.cy},s={tx:0,ty:0};s.ty+=a.title.font.size,i=I(a),-1!==a.title.position.indexOf("top")?(o.y-=(1+i)*t.r,s.ty-=t.titleBox.height):-1!==a.title.position.indexOf("bottom")&&(o.y+=(1+i)*t.r);var l,c,u=(l=t.r,c=t.trace.aspectratio,l/(void 0===c?1:c)),f=e.w*(a.domain.x[1]-a.domain.x[0])/2;return-1!==a.title.position.indexOf("left")?(f+=u,o.x-=(1+i)*u,s.tx+=t.titleBox.width/2):-1!==a.title.position.indexOf("center")?f*=2:-1!==a.title.position.indexOf("right")&&(f+=u,o.x+=(1+i)*u,s.tx-=t.titleBox.width/2),r=f/t.titleBox.width,n=P(t,e)/t.titleBox.height,{x:o.x,y:o.y,scale:Math.min(r,n),tx:s.tx,ty:s.ty}}function P(t,e){var r=t.trace,n=e.h*(r.domain.y[1]-r.domain.y[0]);return Math.min(t.titleBox.height,n/2)}function I(t){var e,r=t.pull;if(!r)return 0;if(Array.isArray(r))for(r=0,e=0;er&&(r=t.pull[e]);return r}function O(t,e){for(var r=[],n=0;n1?(c=r.r,u=c/i.aspectratio):(u=r.r,c=u*i.aspectratio),c*=(1+i.baseratio)/2,l=c*u}o=Math.min(o,l/r.vTotal)}for(n=0;n")}if(a){var x=l.castOption(i,e.i,"texttemplate");if(x){var b=function(t){return{label:t.label,value:t.v,valueLabel:g.formatPieValue(t.v,n.separators),percent:t.v/r.vTotal,percentLabel:g.formatPiePercent(t.v/r.vTotal,n.separators),color:t.color,text:t.text,customdata:l.castOption(i,t.i,"customdata")}}(e),_=g.getFirstFilled(i.text,e.pts);(y(_)||""===_)&&(b.text=_),e.text=l.texttemplateString(x,b,t._fullLayout._d3locale,b,i._meta||{})}else e.text=""}}function R(t,e){var r=t.rotate*Math.PI/180,n=Math.cos(r),i=Math.sin(r),a=(e.left+e.right)/2,o=(e.top+e.bottom)/2;t.textX=a*n-o*i,t.textY=a*i+o*n,t.noCenter=!0}e.exports={plot:function(t,e){var r=t._fullLayout,a=r._size;d("pie",r),_(e,t),O(e,a);var h=l.makeTraceGroups(r._pielayer,e,"trace").each((function(e){var h=n.select(this),d=e[0],m=d.trace;!function(t){var e,r,n,i=t[0],a=i.r,o=i.trace,s=g.getRotationAngle(o.rotation),l=2*Math.PI/i.vTotal,c="px0",u="px1";if("counterclockwise"===o.direction){for(e=0;ei.vTotal/2?1:0,r.halfangle=Math.PI*Math.min(r.v/i.vTotal,.5),r.ring=1-o.hole,r.rInscribed=E(r,i))}(e),h.attr("stroke-linejoin","round"),h.each((function(){var v=n.select(this).selectAll("g.slice").data(e);v.enter().append("g").classed("slice",!0),v.exit().remove();var y=[[[],[]],[[],[]]],_=!1;v.each((function(i,a){if(i.hidden)n.select(this).selectAll("path,g").remove();else{i.pointNumber=i.i,i.curveNumber=m.index,y[i.pxmid[1]<0?0:1][i.pxmid[0]<0?0:1].push(i);var o=d.cx,c=d.cy,u=n.select(this),h=u.selectAll("path.surface").data([i]);if(h.enter().append("path").classed("surface",!0).style({"pointer-events":"all"}),u.call(x,t,e),m.pull){var v=+g.castOption(m.pull,i.pts)||0;v>0&&(o+=v*i.pxmid[0],c+=v*i.pxmid[1])}i.cxFinal=o,i.cyFinal=c;var T=m.hole;if(i.v===d.vTotal){var k="M"+(o+i.px0[0])+","+(c+i.px0[1])+C(i.px0,i.pxmid,!0,1)+C(i.pxmid,i.px0,!0,1)+"Z";T?h.attr("d","M"+(o+T*i.px0[0])+","+(c+T*i.px0[1])+C(i.px0,i.pxmid,!1,T)+C(i.pxmid,i.px0,!1,T)+"Z"+k):h.attr("d",k)}else{var A=C(i.px0,i.px1,!0,1);if(T){var M=1-T;h.attr("d","M"+(o+T*i.px1[0])+","+(c+T*i.px1[1])+C(i.px1,i.px0,!1,T)+"l"+M*i.px0[0]+","+M*i.px0[1]+A+"Z")}else h.attr("d","M"+o+","+c+"l"+i.px0[0]+","+i.px0[1]+A+"Z")}D(t,i,d);var S=g.castOption(m.textposition,i.pts),E=u.selectAll("g.slicetext").data(i.text&&"none"!==S?[0]:[]);E.enter().append("g").classed("slicetext",!0),E.exit().remove(),E.each((function(){var u=l.ensureSingle(n.select(this),"text","",(function(t){t.attr("data-notex",1)})),h=l.ensureUniformFontSize(t,"outside"===S?function(t,e,r){var n=g.castOption(t.outsidetextfont.color,e.pts)||g.castOption(t.textfont.color,e.pts)||r.color,i=g.castOption(t.outsidetextfont.family,e.pts)||g.castOption(t.textfont.family,e.pts)||r.family,a=g.castOption(t.outsidetextfont.size,e.pts)||g.castOption(t.textfont.size,e.pts)||r.size;return{color:n,family:i,size:a}}(m,i,r.font):b(m,i,r.font));u.text(i.text).attr({class:"slicetext",transform:"","text-anchor":"middle"}).call(s.font,h).call(f.convertToTspans,t);var v,y=s.bBox(u.node());if("outside"===S)v=L(y,i);else if(v=w(y,i,d),"auto"===S&&v.scale<1){var x=l.ensureUniformFontSize(t,m.outsidetextfont);u.call(s.font,x),v=L(y=s.bBox(u.node()),i)}var T=v.textPosAngle,k=void 0===T?i.pxmid:z(d.r,T);if(v.targetX=o+k[0]*v.rCenter+(v.x||0),v.targetY=c+k[1]*v.rCenter+(v.y||0),R(v,y),v.outside){var A=v.targetY;i.yLabelMin=A-y.height/2,i.yLabelMid=A,i.yLabelMax=A+y.height/2,i.labelExtraX=0,i.labelExtraY=0,_=!0}v.fontSize=h.size,p(m.type,v,r),e[a].transform=v,u.attr("transform",l.getTextTransform(v))}))}function C(t,e,r,n){var a=n*(e[0]-t[0]),o=n*(e[1]-t[1]);return"a"+n*d.r+","+n*d.r+" 0 "+i.largeArc+(r?" 1 ":" 0 ")+a+","+o}}));var T=n.select(this).selectAll("g.titletext").data(m.title.text?[0]:[]);if(T.enter().append("g").classed("titletext",!0),T.exit().remove(),T.each((function(){var e,r=l.ensureSingle(n.select(this),"text","",(function(t){t.attr("data-notex",1)})),i=m.title.text;m._meta&&(i=l.templateString(i,m._meta)),r.text(i).attr({class:"titletext",transform:"","text-anchor":"middle"}).call(s.font,m.title.font).call(f.convertToTspans,t),e="middle center"===m.title.position?function(t){var e=Math.sqrt(t.titleBox.width*t.titleBox.width+t.titleBox.height*t.titleBox.height);return{x:t.cx,y:t.cy,scale:t.trace.hole*t.r*2/e,tx:0,ty:-t.titleBox.height/2+t.trace.title.font.size}}(d):C(d,a),r.attr("transform",u(e.x,e.y)+c(Math.min(1,e.scale))+u(e.tx,e.ty))})),_&&function(t,e){var r,n,i,a,o,s,l,c,u,f,h,p,d;function m(t,e){return t.pxmid[1]-e.pxmid[1]}function v(t,e){return e.pxmid[1]-t.pxmid[1]}function y(t,r){r||(r={});var i,c,u,h,p=r.labelExtraY+(n?r.yLabelMax:r.yLabelMin),d=n?t.yLabelMin:t.yLabelMax,m=n?t.yLabelMax:t.yLabelMin,v=t.cyFinal+o(t.px0[1],t.px1[1]),y=p-d;if(y*l>0&&(t.labelExtraY=y),Array.isArray(e.pull))for(c=0;c=(g.castOption(e.pull,u.pts)||0)||((t.pxmid[1]-u.pxmid[1])*l>0?(y=u.cyFinal+o(u.px0[1],u.px1[1])-d-t.labelExtraY)*l>0&&(t.labelExtraY+=y):(m+t.labelExtraY-v)*l>0&&(i=3*s*Math.abs(c-f.indexOf(t)),(h=u.cxFinal+a(u.px0[0],u.px1[0])+i-(t.cxFinal+t.pxmid[0])-t.labelExtraX)*s>0&&(t.labelExtraX+=h)))}for(n=0;n<2;n++)for(i=n?m:v,o=n?Math.max:Math.min,l=n?1:-1,r=0;r<2;r++){for(a=r?Math.max:Math.min,s=r?1:-1,(c=t[n][r]).sort(i),u=t[1-n][r],f=u.concat(c),p=[],h=0;hMath.abs(f)?s+="l"+f*t.pxmid[0]/t.pxmid[1]+","+f+"H"+(a+t.labelExtraX+c):s+="l"+t.labelExtraX+","+u+"v"+(f-u)+"h"+c}else s+="V"+(t.yLabelMid+t.labelExtraY)+"h"+c;l.ensureSingle(r,"path","textline").call(o.stroke,e.outsidetextfont.color).attr({"stroke-width":Math.min(2,e.outsidetextfont.size/8),d:s,fill:"none"})}else r.select("path.textline").remove()}))}(v,m),_&&m.automargin){var k=s.bBox(h.node()),A=m.domain,M=a.w*(A.x[1]-A.x[0]),S=a.h*(A.y[1]-A.y[0]),E=(.5*M-d.r)/a.w,P=(.5*S-d.r)/a.h;i.autoMargin(t,"pie."+m.uid+".automargin",{xl:A.x[0]-E,xr:A.x[1]+E,yb:A.y[0]-P,yt:A.y[1]+P,l:Math.max(d.cx-d.r-k.left,0),r:Math.max(k.right-(d.cx+d.r),0),b:Math.max(k.bottom-(d.cy+d.r),0),t:Math.max(d.cy-d.r-k.top,0),pad:5})}}))}));setTimeout((function(){h.selectAll("tspan").each((function(){var t=n.select(this);t.attr("dy")&&t.attr("dy",t.attr("dy"))}))}),0)},formatSliceLabel:D,transformInsideText:w,determineInsideTextFont:b,positionTitleOutside:C,prerenderTitles:_,layoutAreas:O,attachFxHandlers:x,computeTransform:R}},{"../../components/color":361,"../../components/drawing":383,"../../components/fx":401,"../../lib":498,"../../lib/svg_text_utils":524,"../../plots/plots":614,"../bar/constants":645,"../bar/uniform_text":659,"./event_data":900,"./helpers":901,"@plotly/d3":58}],906:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("./style_one"),a=t("../bar/uniform_text").resizeText;e.exports=function(t){var e=t._fullLayout._pielayer.selectAll(".trace");a(t,e,"pie"),e.each((function(t){var e=t[0].trace,r=n.select(this);r.style({opacity:e.opacity}),r.selectAll("path.surface").each((function(t){n.select(this).call(i,t,e)}))}))}},{"../bar/uniform_text":659,"./style_one":907,"@plotly/d3":58}],907:[function(t,e,r){"use strict";var n=t("../../components/color"),i=t("./helpers").castOption;e.exports=function(t,e,r){var a=r.marker.line,o=i(a.color,e.pts)||n.defaultLine,s=i(a.width,e.pts)||0;t.style("stroke-width",s).call(n.fill,e.color).call(n.stroke,o)}},{"../../components/color":361,"./helpers":901}],908:[function(t,e,r){"use strict";var n=t("../scatter/attributes");e.exports={x:n.x,y:n.y,xy:{valType:"data_array",editType:"calc"},indices:{valType:"data_array",editType:"calc"},xbounds:{valType:"data_array",editType:"calc"},ybounds:{valType:"data_array",editType:"calc"},text:n.text,marker:{color:{valType:"color",arrayOk:!1,editType:"calc"},opacity:{valType:"number",min:0,max:1,dflt:1,arrayOk:!1,editType:"calc"},blend:{valType:"boolean",dflt:null,editType:"calc"},sizemin:{valType:"number",min:.1,max:2,dflt:.5,editType:"calc"},sizemax:{valType:"number",min:.1,dflt:20,editType:"calc"},border:{color:{valType:"color",arrayOk:!1,editType:"calc"},arearatio:{valType:"number",min:0,max:1,dflt:0,editType:"calc"},editType:"calc"},editType:"calc"},transforms:void 0}},{"../scatter/attributes":922}],909:[function(t,e,r){"use strict";var n=t("../../../stackgl_modules").gl_pointcloud2d,i=t("../../lib/str2rgbarray"),a=t("../../plots/cartesian/autorange").findExtremes,o=t("../scatter/get_trace_color");function s(t,e){this.scene=t,this.uid=e,this.type="pointcloud",this.pickXData=[],this.pickYData=[],this.xData=[],this.yData=[],this.textLabels=[],this.color="rgb(0, 0, 0)",this.name="",this.hoverinfo="all",this.idToIndex=new Int32Array(0),this.bounds=[0,0,0,0],this.pointcloudOptions={positions:new Float32Array(0),idToIndex:this.idToIndex,sizemin:.5,sizemax:12,color:[0,0,0,1],areaRatio:1,borderColor:[0,0,0,1]},this.pointcloud=n(t.glplot,this.pointcloudOptions),this.pointcloud._trace=this}var l=s.prototype;l.handlePick=function(t){var e=this.idToIndex[t.pointId];return{trace:this,dataCoord:t.dataCoord,traceCoord:this.pickXYData?[this.pickXYData[2*e],this.pickXYData[2*e+1]]:[this.pickXData[e],this.pickYData[e]],textLabel:Array.isArray(this.textLabels)?this.textLabels[e]:this.textLabels,color:this.color,name:this.name,pointIndex:e,hoverinfo:this.hoverinfo}},l.update=function(t){this.index=t.index,this.textLabels=t.text,this.name=t.name,this.hoverinfo=t.hoverinfo,this.bounds=[1/0,1/0,-1/0,-1/0],this.updateFast(t),this.color=o(t,{})},l.updateFast=function(t){var e,r,n,o,s,l,c=this.xData=this.pickXData=t.x,u=this.yData=this.pickYData=t.y,f=this.pickXYData=t.xy,h=t.xbounds&&t.ybounds,p=t.indices,d=this.bounds;if(f){if(n=f,e=f.length>>>1,h)d[0]=t.xbounds[0],d[2]=t.xbounds[1],d[1]=t.ybounds[0],d[3]=t.ybounds[1];else for(l=0;ld[2]&&(d[2]=o),sd[3]&&(d[3]=s);if(p)r=p;else for(r=new Int32Array(e),l=0;ld[2]&&(d[2]=o),sd[3]&&(d[3]=s);this.idToIndex=r,this.pointcloudOptions.idToIndex=r,this.pointcloudOptions.positions=n;var m=i(t.marker.color),g=i(t.marker.border.color),v=t.opacity*t.marker.opacity;m[3]*=v,this.pointcloudOptions.color=m;var y=t.marker.blend;if(null===y){y=c.length<100||u.length<100}this.pointcloudOptions.blend=y,g[3]*=v,this.pointcloudOptions.borderColor=g;var x=t.marker.sizemin,b=Math.max(t.marker.sizemax,t.marker.sizemin);this.pointcloudOptions.sizeMin=x,this.pointcloudOptions.sizeMax=b,this.pointcloudOptions.areaRatio=t.marker.border.arearatio,this.pointcloud.update(this.pointcloudOptions);var _=this.scene.xaxis,w=this.scene.yaxis,T=b/2||.5;t._extremes[_._id]=a(_,[d[0],d[2]],{ppad:T}),t._extremes[w._id]=a(w,[d[1],d[3]],{ppad:T})},l.dispose=function(){this.pointcloud.dispose()},e.exports=function(t,e){var r=new s(t,e.uid);return r.update(e),r}},{"../../../stackgl_modules":1119,"../../lib/str2rgbarray":523,"../../plots/cartesian/autorange":548,"../scatter/get_trace_color":932}],910:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./attributes");e.exports=function(t,e,r){function a(r,a){return n.coerce(t,e,i,r,a)}a("x"),a("y"),a("xbounds"),a("ybounds"),t.xy&&t.xy instanceof Float32Array&&(e.xy=t.xy),t.indices&&t.indices instanceof Int32Array&&(e.indices=t.indices),a("text"),a("marker.color",r),a("marker.opacity"),a("marker.blend"),a("marker.sizemin"),a("marker.sizemax"),a("marker.border.color",r),a("marker.border.arearatio"),e._length=null}},{"../../lib":498,"./attributes":908}],911:[function(t,e,r){"use strict";["*pointcloud* trace is deprecated!","Please consider switching to the *scattergl* trace type."].join(" ");e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),calc:t("../scatter3d/calc"),plot:t("./convert"),moduleType:"trace",name:"pointcloud",basePlotModule:t("../../plots/gl2d"),categories:["gl","gl2d","showLegend"],meta:{}}},{"../../plots/gl2d":591,"../scatter3d/calc":951,"./attributes":908,"./convert":909,"./defaults":910}],912:[function(t,e,r){"use strict";var n=t("../../plots/font_attributes"),i=t("../../plots/attributes"),a=t("../../components/color/attributes"),o=t("../../components/fx/attributes"),s=t("../../plots/domain").attributes,l=t("../../plots/template_attributes").hovertemplateAttrs,c=t("../../components/colorscale/attributes"),u=t("../../plot_api/plot_template").templatedArray,f=t("../../plots/cartesian/axis_format_attributes").descriptionOnlyNumbers,h=t("../../lib/extend").extendFlat,p=t("../../plot_api/edit_types").overrideAll;(e.exports=p({hoverinfo:h({},i.hoverinfo,{flags:[],arrayOk:!1}),hoverlabel:o.hoverlabel,domain:s({name:"sankey",trace:!0}),orientation:{valType:"enumerated",values:["v","h"],dflt:"h"},valueformat:{valType:"string",dflt:".3s",description:f("value")},valuesuffix:{valType:"string",dflt:""},arrangement:{valType:"enumerated",values:["snap","perpendicular","freeform","fixed"],dflt:"snap"},textfont:n({}),customdata:void 0,node:{label:{valType:"data_array",dflt:[]},groups:{valType:"info_array",impliedEdits:{x:[],y:[]},dimensions:2,freeLength:!0,dflt:[],items:{valType:"number",editType:"calc"}},x:{valType:"data_array",dflt:[]},y:{valType:"data_array",dflt:[]},color:{valType:"color",arrayOk:!0},customdata:{valType:"data_array",editType:"calc"},line:{color:{valType:"color",dflt:a.defaultLine,arrayOk:!0},width:{valType:"number",min:0,dflt:.5,arrayOk:!0}},pad:{valType:"number",arrayOk:!1,min:0,dflt:20},thickness:{valType:"number",arrayOk:!1,min:1,dflt:20},hoverinfo:{valType:"enumerated",values:["all","none","skip"],dflt:"all"},hoverlabel:o.hoverlabel,hovertemplate:l({},{keys:["value","label"]})},link:{label:{valType:"data_array",dflt:[]},color:{valType:"color",arrayOk:!0},customdata:{valType:"data_array",editType:"calc"},line:{color:{valType:"color",dflt:a.defaultLine,arrayOk:!0},width:{valType:"number",min:0,dflt:0,arrayOk:!0}},source:{valType:"data_array",dflt:[]},target:{valType:"data_array",dflt:[]},value:{valType:"data_array",dflt:[]},hoverinfo:{valType:"enumerated",values:["all","none","skip"],dflt:"all"},hoverlabel:o.hoverlabel,hovertemplate:l({},{keys:["value","label"]}),colorscales:u("concentrationscales",{editType:"calc",label:{valType:"string",editType:"calc",dflt:""},cmax:{valType:"number",editType:"calc",dflt:1},cmin:{valType:"number",editType:"calc",dflt:0},colorscale:h(c().colorscale,{dflt:[[0,"white"],[1,"black"]]})})}},"calc","nested")).transforms=void 0},{"../../components/color/attributes":360,"../../components/colorscale/attributes":368,"../../components/fx/attributes":392,"../../lib/extend":488,"../../plot_api/edit_types":531,"../../plot_api/plot_template":538,"../../plots/attributes":545,"../../plots/cartesian/axis_format_attributes":552,"../../plots/domain":579,"../../plots/font_attributes":580,"../../plots/template_attributes":628}],913:[function(t,e,r){"use strict";var n=t("../../plot_api/edit_types").overrideAll,i=t("../../plots/get_data").getModuleCalcData,a=t("./plot"),o=t("../../components/fx/layout_attributes"),s=t("../../lib/setcursor"),l=t("../../components/dragelement"),c=t("../../plots/cartesian/select").prepSelect,u=t("../../lib"),f=t("../../registry");function h(t,e){var r=t._fullData[e],n=t._fullLayout,i=n.dragmode,a="pan"===n.dragmode?"move":"crosshair",o=r._bgRect;if("pan"!==i&&"zoom"!==i){s(o,a);var h={_id:"x",c2p:u.identity,_offset:r._sankey.translateX,_length:r._sankey.width},p={_id:"y",c2p:u.identity,_offset:r._sankey.translateY,_length:r._sankey.height},d={gd:t,element:o.node(),plotinfo:{id:e,xaxis:h,yaxis:p,fillRangeItems:u.noop},subplot:e,xaxes:[h],yaxes:[p],doneFnCompleted:function(r){var n,i=t._fullData[e],a=i.node.groups.slice(),o=[];function s(t){for(var e=i._sankey.graph.nodes,r=0;ry&&(y=a.source[e]),a.target[e]>y&&(y=a.target[e]);var x,b=y+1;t.node._count=b;var _=t.node.groups,w={};for(e=0;e<_.length;e++){var T=_[e];for(x=0;x0&&s(E,b)&&s(L,b)&&(!w.hasOwnProperty(E)||!w.hasOwnProperty(L)||w[E]!==w[L])){w.hasOwnProperty(L)&&(L=w[L]),w.hasOwnProperty(E)&&(E=w[E]),L=+L,h[E=+E]=h[L]=!0;var C="";a.label&&a.label[e]&&(C=a.label[e]);var P=null;C&&p.hasOwnProperty(C)&&(P=p[C]),c.push({pointNumber:e,label:C,color:u?a.color[e]:a.color,customdata:f?a.customdata[e]:a.customdata,concentrationscale:P,source:E,target:L,value:+S}),M.source.push(E),M.target.push(L)}}var I=b+_.length,O=o(r.color),z=o(r.customdata),D=[];for(e=0;eb-1,childrenNodes:[],pointNumber:e,label:R,color:O?r.color[e]:r.color,customdata:z?r.customdata[e]:r.customdata})}var F=!1;return function(t,e,r){for(var a=i.init2dArray(t,0),o=0;o1}))}(I,M.source,M.target)&&(F=!0),{circular:F,links:c,nodes:D,groups:_,groupLookup:w}}e.exports=function(t,e){var r=c(e);return a({circular:r.circular,_nodes:r.nodes,_links:r.links,_groups:r.groups,_groupLookup:r.groupLookup})}},{"../../components/colorscale":373,"../../lib":498,"../../lib/gup":495,"strongly-connected-components":301}],915:[function(t,e,r){"use strict";e.exports={nodeTextOffsetHorizontal:4,nodeTextOffsetVertical:3,nodePadAcross:10,sankeyIterations:50,forceIterations:5,forceTicksPerFrame:10,duration:500,ease:"linear",cn:{sankey:"sankey",sankeyLinks:"sankey-links",sankeyLink:"sankey-link",sankeyNodeSet:"sankey-node-set",sankeyNode:"sankey-node",nodeRect:"node-rect",nodeLabel:"node-label"}}},{}],916:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./attributes"),a=t("../../components/color"),o=t("tinycolor2"),s=t("../../plots/domain").defaults,l=t("../../components/fx/hoverlabel_defaults"),c=t("../../plot_api/plot_template"),u=t("../../plots/array_container_defaults");function f(t,e){function r(r,a){return n.coerce(t,e,i.link.colorscales,r,a)}r("label"),r("cmin"),r("cmax"),r("colorscale")}e.exports=function(t,e,r,h){function p(r,a){return n.coerce(t,e,i,r,a)}var d=n.extendDeep(h.hoverlabel,t.hoverlabel),m=t.node,g=c.newContainer(e,"node");function v(t,e){return n.coerce(m,g,i.node,t,e)}v("label"),v("groups"),v("x"),v("y"),v("pad"),v("thickness"),v("line.color"),v("line.width"),v("hoverinfo",t.hoverinfo),l(m,g,v,d),v("hovertemplate");var y=h.colorway;v("color",g.label.map((function(t,e){return a.addOpacity(function(t){return y[t%y.length]}(e),.8)}))),v("customdata");var x=t.link||{},b=c.newContainer(e,"link");function _(t,e){return n.coerce(x,b,i.link,t,e)}_("label"),_("source"),_("target"),_("value"),_("line.color"),_("line.width"),_("hoverinfo",t.hoverinfo),l(x,b,_,d),_("hovertemplate");var w,T=o(h.paper_bgcolor).getLuminance()<.333?"rgba(255, 255, 255, 0.6)":"rgba(0, 0, 0, 0.2)";_("color",n.repeat(T,b.value.length)),_("customdata"),u(x,b,{name:"colorscales",handleItemDefaults:f}),s(e,h,p),p("orientation"),p("valueformat"),p("valuesuffix"),g.x.length&&g.y.length&&(w="freeform"),p("arrangement",w),n.coerceFont(p,"textfont",n.extendFlat({},h.font)),e._length=null}},{"../../components/color":361,"../../components/fx/hoverlabel_defaults":399,"../../lib":498,"../../plot_api/plot_template":538,"../../plots/array_container_defaults":544,"../../plots/domain":579,"./attributes":912,tinycolor2:307}],917:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),calc:t("./calc"),plot:t("./plot"),moduleType:"trace",name:"sankey",basePlotModule:t("./base_plot"),selectPoints:t("./select.js"),categories:["noOpacity"],meta:{}}},{"./attributes":912,"./base_plot":913,"./calc":914,"./defaults":916,"./plot":918,"./select.js":920}],918:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=i.numberFormat,o=t("./render"),s=t("../../components/fx"),l=t("../../components/color"),c=t("./constants").cn,u=i._;function f(t){return""!==t}function h(t,e){return t.filter((function(t){return t.key===e.traceId}))}function p(t,e){n.select(t).select("path").style("fill-opacity",e),n.select(t).select("rect").style("fill-opacity",e)}function d(t){n.select(t).select("text.name").style("fill","black")}function m(t){return function(e){return-1!==t.node.sourceLinks.indexOf(e.link)||-1!==t.node.targetLinks.indexOf(e.link)}}function g(t){return function(e){return-1!==e.node.sourceLinks.indexOf(t.link)||-1!==e.node.targetLinks.indexOf(t.link)}}function v(t,e,r){e&&r&&h(r,e).selectAll("."+c.sankeyLink).filter(m(e)).call(x.bind(0,e,r,!1))}function y(t,e,r){e&&r&&h(r,e).selectAll("."+c.sankeyLink).filter(m(e)).call(b.bind(0,e,r,!1))}function x(t,e,r,n){var i=n.datum().link.label;n.style("fill-opacity",(function(t){if(!t.link.concentrationscale)return.4})),i&&h(e,t).selectAll("."+c.sankeyLink).filter((function(t){return t.link.label===i})).style("fill-opacity",(function(t){if(!t.link.concentrationscale)return.4})),r&&h(e,t).selectAll("."+c.sankeyNode).filter(g(t)).call(v)}function b(t,e,r,n){var i=n.datum().link.label;n.style("fill-opacity",(function(t){return t.tinyColorAlpha})),i&&h(e,t).selectAll("."+c.sankeyLink).filter((function(t){return t.link.label===i})).style("fill-opacity",(function(t){return t.tinyColorAlpha})),r&&h(e,t).selectAll(c.sankeyNode).filter(g(t)).call(y)}function _(t,e){var r=t.hoverlabel||{},n=i.nestedProperty(r,e).get();return!Array.isArray(n)&&n}e.exports=function(t,e){for(var r=t._fullLayout,i=r._paper,h=r._size,m=0;m"),color:_(o,"bgcolor")||l.addOpacity(m.color,1),borderColor:_(o,"bordercolor"),fontFamily:_(o,"font.family"),fontSize:_(o,"font.size"),fontColor:_(o,"font.color"),nameLength:_(o,"namelength"),textAlign:_(o,"align"),idealAlign:n.event.x"),color:_(o,"bgcolor")||i.tinyColorHue,borderColor:_(o,"bordercolor"),fontFamily:_(o,"font.family"),fontSize:_(o,"font.size"),fontColor:_(o,"font.color"),nameLength:_(o,"namelength"),textAlign:_(o,"align"),idealAlign:"left",hovertemplate:o.hovertemplate,hovertemplateLabels:y,eventData:[i.node]},{container:r._hoverlayer.node(),outerContainer:r._paper.node(),gd:t});p(w,.85),d(w)}}},unhover:function(e,i,a){!1!==t._fullLayout.hovermode&&(n.select(e).call(y,i,a),"skip"!==i.node.trace.node.hoverinfo&&(i.node.fullData=i.node.trace,t.emit("plotly_unhover",{event:n.event,points:[i.node]})),s.loneUnhover(r._hoverlayer.node()))},select:function(e,r,i){var a=r.node;a.originalEvent=n.event,t._hoverdata=[a],n.select(e).call(y,r,i),s.click(t,{target:!0})}}})}},{"../../components/color":361,"../../components/fx":401,"../../lib":498,"./constants":915,"./render":919,"@plotly/d3":58}],919:[function(t,e,r){"use strict";var n=t("d3-force"),i=t("d3-interpolate").interpolateNumber,a=t("@plotly/d3"),o=t("@plotly/d3-sankey"),s=t("@plotly/d3-sankey-circular"),l=t("./constants"),c=t("tinycolor2"),u=t("../../components/color"),f=t("../../components/drawing"),h=t("../../lib"),p=h.strTranslate,d=h.strRotate,m=t("../../lib/gup"),g=m.keyFun,v=m.repeat,y=m.unwrap,x=t("../../lib/svg_text_utils"),b=t("../../registry"),_=t("../../constants/alignment"),w=_.CAP_SHIFT,T=_.LINE_SPACING;function k(t,e,r){var n,i=y(e),a=i.trace,u=a.domain,f="h"===a.orientation,p=a.node.pad,d=a.node.thickness,m=t.width*(u.x[1]-u.x[0]),g=t.height*(u.y[1]-u.y[0]),v=i._nodes,x=i._links,b=i.circular;(n=b?s.sankeyCircular().circularLinkGap(0):o.sankey()).iterations(l.sankeyIterations).size(f?[m,g]:[g,m]).nodeWidth(d).nodePadding(p).nodeId((function(t){return t.pointNumber})).nodes(v).links(x);var _,w,T,k=n();for(var A in n.nodePadding()=i||(r=i-e.y0)>1e-6&&(e.y0+=r,e.y1+=r),i=e.y1+p}))}(function(t){var e,r,n=t.map((function(t,e){return{x0:t.x0,index:e}})).sort((function(t,e){return t.x0-e.x0})),i=[],a=-1,o=-1/0;for(_=0;_o+d&&(a+=1,e=s.x0),o=s.x0,i[a]||(i[a]=[]),i[a].push(s),r=e-s.x0,s.x0+=r,s.x1+=r}return i}(v=k.nodes));n.update(k)}return{circular:b,key:r,trace:a,guid:h.randstr(),horizontal:f,width:m,height:g,nodePad:a.node.pad,nodeLineColor:a.node.line.color,nodeLineWidth:a.node.line.width,linkLineColor:a.link.line.color,linkLineWidth:a.link.line.width,valueFormat:a.valueformat,valueSuffix:a.valuesuffix,textFont:a.textfont,translateX:u.x[0]*t.width+t.margin.l,translateY:t.height-u.y[1]*t.height+t.margin.t,dragParallel:f?g:m,dragPerpendicular:f?m:g,arrangement:a.arrangement,sankey:n,graph:k,forceLayouts:{},interactionState:{dragInProgress:!1,hovered:!1}}}function A(t,e,r){var n=c(e.color),i=e.source.label+"|"+e.target.label+"__"+r;return e.trace=t.trace,e.curveNumber=t.trace.index,{circular:t.circular,key:i,traceId:t.key,pointNumber:e.pointNumber,link:e,tinyColorHue:u.tinyRGB(n),tinyColorAlpha:n.getAlpha(),linkPath:M,linkLineColor:t.linkLineColor,linkLineWidth:t.linkLineWidth,valueFormat:t.valueFormat,valueSuffix:t.valueSuffix,sankey:t.sankey,parent:t,interactionState:t.interactionState,flow:e.flow}}function M(){return function(t){if(t.link.circular)return e=t.link,r=e.width/2,n=e.circularPathData,"top"===e.circularLinkType?"M "+n.targetX+" "+(n.targetY+r)+" L"+n.rightInnerExtent+" "+(n.targetY+r)+"A"+(n.rightLargeArcRadius+r)+" "+(n.rightSmallArcRadius+r)+" 0 0 1 "+(n.rightFullExtent-r)+" "+(n.targetY-n.rightSmallArcRadius)+"L"+(n.rightFullExtent-r)+" "+n.verticalRightInnerExtent+"A"+(n.rightLargeArcRadius+r)+" "+(n.rightLargeArcRadius+r)+" 0 0 1 "+n.rightInnerExtent+" "+(n.verticalFullExtent-r)+"L"+n.leftInnerExtent+" "+(n.verticalFullExtent-r)+"A"+(n.leftLargeArcRadius+r)+" "+(n.leftLargeArcRadius+r)+" 0 0 1 "+(n.leftFullExtent+r)+" "+n.verticalLeftInnerExtent+"L"+(n.leftFullExtent+r)+" "+(n.sourceY-n.leftSmallArcRadius)+"A"+(n.leftLargeArcRadius+r)+" "+(n.leftSmallArcRadius+r)+" 0 0 1 "+n.leftInnerExtent+" "+(n.sourceY+r)+"L"+n.sourceX+" "+(n.sourceY+r)+"L"+n.sourceX+" "+(n.sourceY-r)+"L"+n.leftInnerExtent+" "+(n.sourceY-r)+"A"+(n.leftLargeArcRadius-r)+" "+(n.leftSmallArcRadius-r)+" 0 0 0 "+(n.leftFullExtent-r)+" "+(n.sourceY-n.leftSmallArcRadius)+"L"+(n.leftFullExtent-r)+" "+n.verticalLeftInnerExtent+"A"+(n.leftLargeArcRadius-r)+" "+(n.leftLargeArcRadius-r)+" 0 0 0 "+n.leftInnerExtent+" "+(n.verticalFullExtent+r)+"L"+n.rightInnerExtent+" "+(n.verticalFullExtent+r)+"A"+(n.rightLargeArcRadius-r)+" "+(n.rightLargeArcRadius-r)+" 0 0 0 "+(n.rightFullExtent+r)+" "+n.verticalRightInnerExtent+"L"+(n.rightFullExtent+r)+" "+(n.targetY-n.rightSmallArcRadius)+"A"+(n.rightLargeArcRadius-r)+" "+(n.rightSmallArcRadius-r)+" 0 0 0 "+n.rightInnerExtent+" "+(n.targetY-r)+"L"+n.targetX+" "+(n.targetY-r)+"Z":"M "+n.targetX+" "+(n.targetY-r)+" L"+n.rightInnerExtent+" "+(n.targetY-r)+"A"+(n.rightLargeArcRadius+r)+" "+(n.rightSmallArcRadius+r)+" 0 0 0 "+(n.rightFullExtent-r)+" "+(n.targetY+n.rightSmallArcRadius)+"L"+(n.rightFullExtent-r)+" "+n.verticalRightInnerExtent+"A"+(n.rightLargeArcRadius+r)+" "+(n.rightLargeArcRadius+r)+" 0 0 0 "+n.rightInnerExtent+" "+(n.verticalFullExtent+r)+"L"+n.leftInnerExtent+" "+(n.verticalFullExtent+r)+"A"+(n.leftLargeArcRadius+r)+" "+(n.leftLargeArcRadius+r)+" 0 0 0 "+(n.leftFullExtent+r)+" "+n.verticalLeftInnerExtent+"L"+(n.leftFullExtent+r)+" "+(n.sourceY+n.leftSmallArcRadius)+"A"+(n.leftLargeArcRadius+r)+" "+(n.leftSmallArcRadius+r)+" 0 0 0 "+n.leftInnerExtent+" "+(n.sourceY-r)+"L"+n.sourceX+" "+(n.sourceY-r)+"L"+n.sourceX+" "+(n.sourceY+r)+"L"+n.leftInnerExtent+" "+(n.sourceY+r)+"A"+(n.leftLargeArcRadius-r)+" "+(n.leftSmallArcRadius-r)+" 0 0 1 "+(n.leftFullExtent-r)+" "+(n.sourceY+n.leftSmallArcRadius)+"L"+(n.leftFullExtent-r)+" "+n.verticalLeftInnerExtent+"A"+(n.leftLargeArcRadius-r)+" "+(n.leftLargeArcRadius-r)+" 0 0 1 "+n.leftInnerExtent+" "+(n.verticalFullExtent-r)+"L"+n.rightInnerExtent+" "+(n.verticalFullExtent-r)+"A"+(n.rightLargeArcRadius-r)+" "+(n.rightLargeArcRadius-r)+" 0 0 1 "+(n.rightFullExtent+r)+" "+n.verticalRightInnerExtent+"L"+(n.rightFullExtent+r)+" "+(n.targetY+n.rightSmallArcRadius)+"A"+(n.rightLargeArcRadius-r)+" "+(n.rightSmallArcRadius-r)+" 0 0 1 "+n.rightInnerExtent+" "+(n.targetY+r)+"L"+n.targetX+" "+(n.targetY+r)+"Z";var e,r,n,a=t.link.source.x1,o=t.link.target.x0,s=i(a,o),l=s(.5),c=s(.5),u=t.link.y0-t.link.width/2,f=t.link.y0+t.link.width/2,h=t.link.y1-t.link.width/2,p=t.link.y1+t.link.width/2;return"M"+a+","+u+"C"+l+","+u+" "+c+","+h+" "+o+","+h+"L"+o+","+p+"C"+c+","+p+" "+l+","+f+" "+a+","+f+"Z"}}function S(t,e){var r=c(e.color),n=l.nodePadAcross,i=t.nodePad/2;e.dx=e.x1-e.x0,e.dy=e.y1-e.y0;var a=e.dx,o=Math.max(.5,e.dy),s="node_"+e.pointNumber;return e.group&&(s=h.randstr()),e.trace=t.trace,e.curveNumber=t.trace.index,{index:e.pointNumber,key:s,partOfGroup:e.partOfGroup||!1,group:e.group,traceId:t.key,trace:t.trace,node:e,nodePad:t.nodePad,nodeLineColor:t.nodeLineColor,nodeLineWidth:t.nodeLineWidth,textFont:t.textFont,size:t.horizontal?t.height:t.width,visibleWidth:Math.ceil(a),visibleHeight:o,zoneX:-n,zoneY:-i,zoneWidth:a+2*n,zoneHeight:o+2*i,labelY:t.horizontal?e.dy/2+1:e.dx/2+1,left:1===e.originalLayer,sizeAcross:t.width,forceLayouts:t.forceLayouts,horizontal:t.horizontal,darkBackground:r.getBrightness()<=128,tinyColorHue:u.tinyRGB(r),tinyColorAlpha:r.getAlpha(),valueFormat:t.valueFormat,valueSuffix:t.valueSuffix,sankey:t.sankey,graph:t.graph,arrangement:t.arrangement,uniqueNodeLabelPathId:[t.guid,t.key,s].join("_"),interactionState:t.interactionState,figure:t}}function E(t){t.attr("transform",(function(t){return p(t.node.x0.toFixed(3),t.node.y0.toFixed(3))}))}function L(t){t.call(E)}function C(t,e){t.call(L),e.attr("d",M())}function P(t){t.attr("width",(function(t){return t.node.x1-t.node.x0})).attr("height",(function(t){return t.visibleHeight}))}function I(t){return t.link.width>1||t.linkLineWidth>0}function O(t){return p(t.translateX,t.translateY)+(t.horizontal?"matrix(1 0 0 1 0 0)":"matrix(0 1 1 0 0 0)")}function z(t,e,r){t.on(".basic",null).on("mouseover.basic",(function(t){t.interactionState.dragInProgress||t.partOfGroup||(r.hover(this,t,e),t.interactionState.hovered=[this,t])})).on("mousemove.basic",(function(t){t.interactionState.dragInProgress||t.partOfGroup||(r.follow(this,t),t.interactionState.hovered=[this,t])})).on("mouseout.basic",(function(t){t.interactionState.dragInProgress||t.partOfGroup||(r.unhover(this,t,e),t.interactionState.hovered=!1)})).on("click.basic",(function(t){t.interactionState.hovered&&(r.unhover(this,t,e),t.interactionState.hovered=!1),t.interactionState.dragInProgress||t.partOfGroup||r.select(this,t,e)}))}function D(t,e,r,i){var o=a.behavior.drag().origin((function(t){return{x:t.node.x0+t.visibleWidth/2,y:t.node.y0+t.visibleHeight/2}})).on("dragstart",(function(a){if("fixed"!==a.arrangement&&(h.ensureSingle(i._fullLayout._infolayer,"g","dragcover",(function(t){i._fullLayout._dragCover=t})),h.raiseToTop(this),a.interactionState.dragInProgress=a.node,F(a.node),a.interactionState.hovered&&(r.nodeEvents.unhover.apply(0,a.interactionState.hovered),a.interactionState.hovered=!1),"snap"===a.arrangement)){var o=a.traceId+"|"+a.key;a.forceLayouts[o]?a.forceLayouts[o].alpha(1):function(t,e,r,i){!function(t){for(var e=0;e0&&n.forceLayouts[e].alpha(0)}}(0,e,a,r)).stop()}(0,o,a),function(t,e,r,n,i){window.requestAnimationFrame((function a(){var o;for(o=0;o0)window.requestAnimationFrame(a);else{var s=r.node.originalX;r.node.x0=s-r.visibleWidth/2,r.node.x1=s+r.visibleWidth/2,R(r,i)}}))}(t,e,a,o,i)}})).on("drag",(function(r){if("fixed"!==r.arrangement){var n=a.event.x,i=a.event.y;"snap"===r.arrangement?(r.node.x0=n-r.visibleWidth/2,r.node.x1=n+r.visibleWidth/2,r.node.y0=i-r.visibleHeight/2,r.node.y1=i+r.visibleHeight/2):("freeform"===r.arrangement&&(r.node.x0=n-r.visibleWidth/2,r.node.x1=n+r.visibleWidth/2),i=Math.max(0,Math.min(r.size-r.visibleHeight/2,i)),r.node.y0=i-r.visibleHeight/2,r.node.y1=i+r.visibleHeight/2),F(r.node),"snap"!==r.arrangement&&(r.sankey.update(r.graph),C(t.filter(B(r)),e))}})).on("dragend",(function(t){if("fixed"!==t.arrangement){t.interactionState.dragInProgress=!1;for(var e=0;el&&C[v].gap;)v--;for(x=C[v].s,m=C.length-1;m>v;m--)C[m].s=x;for(;lM[u]&&u=0;i--){var a=t[i];if("scatter"===a.type&&a.xaxis===r.xaxis&&a.yaxis===r.yaxis){a.opacity=void 0;break}}}}}},{}],929:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../registry"),a=t("./attributes"),o=t("./constants"),s=t("./subtypes"),l=t("./xy_defaults"),c=t("./period_defaults"),u=t("./stack_defaults"),f=t("./marker_defaults"),h=t("./line_defaults"),p=t("./line_shape_defaults"),d=t("./text_defaults"),m=t("./fillcolor_defaults"),g=t("../../lib").coercePattern;e.exports=function(t,e,r,v){function y(r,i){return n.coerce(t,e,a,r,i)}var x=l(t,e,v,y);if(x||(e.visible=!1),e.visible){c(t,e,v,y),y("xhoverformat"),y("yhoverformat");var b=u(t,e,v,y),_=!b&&x=Math.min(e,r)&&d<=Math.max(e,r)?0:1/0}var n=Math.max(3,t.mrc||0),i=1-1/n,a=Math.abs(h.c2p(t.x)-d);return a=Math.min(e,r)&&m<=Math.max(e,r)?0:1/0}var n=Math.max(3,t.mrc||0),i=1-1/n,a=Math.abs(p.c2p(t.y)-m);return aW!=(N=z[I][1])>=W&&(R=z[I-1][0],F=z[I][0],N-B&&(D=R+(F-R)*(W-B)/(N-B),H=Math.min(H,D),q=Math.max(q,D)));H=Math.max(H,0),q=Math.min(q,h._length);var X=s.defaultLine;return s.opacity(f.fillcolor)?X=f.fillcolor:s.opacity((f.line||{}).color)&&(X=f.line.color),n.extendFlat(t,{distance:t.maxHoverDistance,x0:H,x1:q,y0:W,y1:W,color:X,hovertemplate:!1}),delete t.index,f.text&&!Array.isArray(f.text)?t.text=String(f.text):t.text=f.name,[t]}}}},{"../../components/color":361,"../../components/fx":401,"../../lib":498,"../../registry":633,"./get_trace_color":932}],934:[function(t,e,r){"use strict";var n=t("./subtypes");e.exports={hasLines:n.hasLines,hasMarkers:n.hasMarkers,hasText:n.hasText,isBubble:n.isBubble,attributes:t("./attributes"),supplyDefaults:t("./defaults"),crossTraceDefaults:t("./cross_trace_defaults"),calc:t("./calc").calc,crossTraceCalc:t("./cross_trace_calc"),arraysToCalcdata:t("./arrays_to_calcdata"),plot:t("./plot"),colorbar:t("./marker_colorbar"),formatLabels:t("./format_labels"),style:t("./style").style,styleOnSelect:t("./style").styleOnSelect,hoverPoints:t("./hover"),selectPoints:t("./select"),animatable:!0,moduleType:"trace",name:"scatter",basePlotModule:t("../../plots/cartesian"),categories:["cartesian","svg","symbols","errorBarsOK","showLegend","scatter-like","zoomScale"],meta:{}}},{"../../plots/cartesian":563,"./arrays_to_calcdata":921,"./attributes":922,"./calc":923,"./cross_trace_calc":927,"./cross_trace_defaults":928,"./defaults":929,"./format_labels":931,"./hover":933,"./marker_colorbar":940,"./plot":943,"./select":944,"./style":946,"./subtypes":947}],935:[function(t,e,r){"use strict";var n=t("../../lib").isArrayOrTypedArray,i=t("../../components/colorscale/helpers").hasColorscale,a=t("../../components/colorscale/defaults");e.exports=function(t,e,r,o,s,l){var c=(t.marker||{}).color;(s("line.color",r),i(t,"line"))?a(t,e,o,s,{prefix:"line.",cLetter:"c"}):s("line.color",!n(c)&&c||r);s("line.width"),(l||{}).noDash||s("line.dash")}},{"../../components/colorscale/defaults":371,"../../components/colorscale/helpers":372,"../../lib":498}],936:[function(t,e,r){"use strict";var n=t("../../constants/numerical"),i=n.BADNUM,a=n.LOG_CLIP,o=a+.5,s=a-.5,l=t("../../lib"),c=l.segmentsIntersect,u=l.constrain,f=t("./constants");e.exports=function(t,e){var r,n,a,h,p,d,m,g,v,y,x,b,_,w,T,k,A,M,S=e.xaxis,E=e.yaxis,L="log"===S.type,C="log"===E.type,P=S._length,I=E._length,O=e.connectGaps,z=e.baseTolerance,D=e.shape,R="linear"===D,F=e.fill&&"none"!==e.fill,B=[],N=f.minTolerance,j=t.length,U=new Array(j),V=0;function H(r){var n=t[r];if(!n)return!1;var a=e.linearized?S.l2p(n.x):S.c2p(n.x),l=e.linearized?E.l2p(n.y):E.c2p(n.y);if(a===i){if(L&&(a=S.c2p(n.x,!0)),a===i)return!1;C&&l===i&&(a*=Math.abs(S._m*I*(S._m>0?o:s)/(E._m*P*(E._m>0?o:s)))),a*=1e3}if(l===i){if(C&&(l=E.c2p(n.y,!0)),l===i)return!1;l*=1e3}return[a,l]}function q(t,e,r,n){var i=r-t,a=n-e,o=.5-t,s=.5-e,l=i*i+a*a,c=i*o+a*s;if(c>0&&crt||t[1]it)return[u(t[0],et,rt),u(t[1],nt,it)]}function st(t,e){return t[0]===e[0]&&(t[0]===et||t[0]===rt)||(t[1]===e[1]&&(t[1]===nt||t[1]===it)||void 0)}function lt(t,e,r){return function(n,i){var a=ot(n),o=ot(i),s=[];if(a&&o&&st(a,o))return s;a&&s.push(a),o&&s.push(o);var c=2*l.constrain((n[t]+i[t])/2,e,r)-((a||n)[t]+(o||i)[t]);c&&((a&&o?c>0==a[t]>o[t]?a:o:a||o)[t]+=c);return s}}function ct(t){var e=t[0],r=t[1],n=e===U[V-1][0],i=r===U[V-1][1];if(!n||!i)if(V>1){var a=e===U[V-2][0],o=r===U[V-2][1];n&&(e===et||e===rt)&&a?o?V--:U[V-1]=t:i&&(r===nt||r===it)&&o?a?V--:U[V-1]=t:U[V++]=t}else U[V++]=t}function ut(t){U[V-1][0]!==t[0]&&U[V-1][1]!==t[1]&&ct([Z,J]),ct(t),K=null,Z=J=0}function ft(t){if(A=t[0]/P,M=t[1]/I,W=t[0]rt?rt:0,X=t[1]it?it:0,W||X){if(V)if(K){var e=$(K,t);e.length>1&&(ut(e[0]),U[V++]=e[1])}else Q=$(U[V-1],t)[0],U[V++]=Q;else U[V++]=[W||t[0],X||t[1]];var r=U[V-1];W&&X&&(r[0]!==W||r[1]!==X)?(K&&(Z!==W&&J!==X?ct(Z&&J?(n=K,a=(i=t)[0]-n[0],o=(i[1]-n[1])/a,(n[1]*i[0]-i[1]*n[0])/a>0?[o>0?et:rt,it]:[o>0?rt:et,nt]):[Z||W,J||X]):Z&&J&&ct([Z,J])),ct([W,X])):Z-W&&J-X&&ct([W||Z,X||J]),K=t,Z=W,J=X}else K&&ut($(K,t)[0]),U[V++]=t;var n,i,a,o}for("linear"===D||"spline"===D?$=function(t,e){for(var r=[],n=0,i=0;i<4;i++){var a=at[i],o=c(t[0],t[1],e[0],e[1],a[0],a[1],a[2],a[3]);o&&(!n||Math.abs(o.x-r[0][0])>1||Math.abs(o.y-r[0][1])>1)&&(o=[o.x,o.y],n&&Y(o,t)G(d,ht))break;a=d,(_=v[0]*g[0]+v[1]*g[1])>x?(x=_,h=d,m=!1):_=t.length||!d)break;ft(d),n=d}}else ft(h)}K&&ct([Z||K[0],J||K[1]]),B.push(U.slice(0,V))}return B}},{"../../constants/numerical":474,"../../lib":498,"./constants":926}],937:[function(t,e,r){"use strict";e.exports=function(t,e,r){"spline"===r("line.shape")&&r("line.smoothing")}},{}],938:[function(t,e,r){"use strict";var n={tonextx:1,tonexty:1,tonext:1};e.exports=function(t,e,r){var i,a,o,s,l,c={},u=!1,f=-1,h=0,p=-1;for(a=0;a=0?l=p:(l=p=h,h++),l0?Math.max(r,a):0}}},{"fast-isnumeric":185}],940:[function(t,e,r){"use strict";e.exports={container:"marker",min:"cmin",max:"cmax"}},{}],941:[function(t,e,r){"use strict";var n=t("../../components/color"),i=t("../../components/colorscale/helpers").hasColorscale,a=t("../../components/colorscale/defaults"),o=t("./subtypes");e.exports=function(t,e,r,s,l,c){var u=o.isBubble(t),f=(t.line||{}).color;(c=c||{},f&&(r=f),l("marker.symbol"),l("marker.opacity",u?.7:1),l("marker.size"),l("marker.color",r),i(t,"marker")&&a(t,e,s,l,{prefix:"marker.",cLetter:"c"}),c.noSelect||(l("selected.marker.color"),l("unselected.marker.color"),l("selected.marker.size"),l("unselected.marker.size")),c.noLine||(l("marker.line.color",f&&!Array.isArray(f)&&e.marker.color!==f?f:u?n.background:n.defaultLine),i(t,"marker.line")&&a(t,e,s,l,{prefix:"marker.line.",cLetter:"c"}),l("marker.line.width",u?1:0)),u&&(l("marker.sizeref"),l("marker.sizemin"),l("marker.sizemode")),c.gradient)&&("none"!==l("marker.gradient.type")&&l("marker.gradient.color"))}},{"../../components/color":361,"../../components/colorscale/defaults":371,"../../components/colorscale/helpers":372,"./subtypes":947}],942:[function(t,e,r){"use strict";var n=t("../../lib").dateTick0,i=t("../../constants/numerical").ONEWEEK;function a(t,e){return n(e,t%i==0?1:0)}e.exports=function(t,e,r,n,i){if(i||(i={x:!0,y:!0}),i.x){var o=n("xperiod");o&&(n("xperiod0",a(o,e.xcalendar)),n("xperiodalignment"))}if(i.y){var s=n("yperiod");s&&(n("yperiod0",a(s,e.ycalendar)),n("yperiodalignment"))}}},{"../../constants/numerical":474,"../../lib":498}],943:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../registry"),a=t("../../lib"),o=a.ensureSingle,s=a.identity,l=t("../../components/drawing"),c=t("./subtypes"),u=t("./line_points"),f=t("./link_traces"),h=t("../../lib/polygon").tester;function p(t,e,r,f,p,d,m){var g;!function(t,e,r,i,o){var s=r.xaxis,l=r.yaxis,u=n.extent(a.simpleMap(s.range,s.r2c)),f=n.extent(a.simpleMap(l.range,l.r2c)),h=i[0].trace;if(!c.hasMarkers(h))return;var p=h.marker.maxdisplayed;if(0===p)return;var d=i.filter((function(t){return t.x>=u[0]&&t.x<=u[1]&&t.y>=f[0]&&t.y<=f[1]})),m=Math.ceil(d.length/p),g=0;o.forEach((function(t,r){var n=t[0].trace;c.hasMarkers(n)&&n.marker.maxdisplayed>0&&r0;function y(t){return v?t.transition():t}var x=r.xaxis,b=r.yaxis,_=f[0].trace,w=_.line,T=n.select(d),k=o(T,"g","errorbars"),A=o(T,"g","lines"),M=o(T,"g","points"),S=o(T,"g","text");if(i.getComponentMethod("errorbars","plot")(t,k,r,m),!0===_.visible){var E,L;y(T).style("opacity",_.opacity);var C=_.fill.charAt(_.fill.length-1);"x"!==C&&"y"!==C&&(C=""),f[0][r.isRangePlot?"nodeRangePlot3":"node3"]=T;var P,I,O="",z=[],D=_._prevtrace;D&&(O=D._prevRevpath||"",L=D._nextFill,z=D._polygons);var R,F,B,N,j,U,V,H="",q="",G=[],Y=a.noop;if(E=_._ownFill,c.hasLines(_)||"none"!==_.fill){for(L&&L.datum(f),-1!==["hv","vh","hvh","vhv"].indexOf(w.shape)?(R=l.steps(w.shape),F=l.steps(w.shape.split("").reverse().join(""))):R=F="spline"===w.shape?function(t){var e=t[t.length-1];return t.length>1&&t[0][0]===e[0]&&t[0][1]===e[1]?l.smoothclosed(t.slice(1),w.smoothing):l.smoothopen(t,w.smoothing)}:function(t){return"M"+t.join("L")},B=function(t){return F(t.reverse())},G=u(f,{xaxis:x,yaxis:b,connectGaps:_.connectgaps,baseTolerance:Math.max(w.width||1,3)/4,shape:w.shape,simplify:w.simplify,fill:_.fill}),V=_._polygons=new Array(G.length),g=0;g1){var r=n.select(this);if(r.datum(f),t)y(r.style("opacity",0).attr("d",P).call(l.lineGroupStyle)).style("opacity",1);else{var i=y(r);i.attr("d",P),l.singleLineStyle(f,i)}}}}}var W=A.selectAll(".js-line").data(G);y(W.exit()).style("opacity",0).remove(),W.each(Y(!1)),W.enter().append("path").classed("js-line",!0).style("vector-effect","non-scaling-stroke").call(l.lineGroupStyle).each(Y(!0)),l.setClipUrl(W,r.layerClipId,t),G.length?(E?(E.datum(f),N&&U&&(C?("y"===C?N[1]=U[1]=b.c2p(0,!0):"x"===C&&(N[0]=U[0]=x.c2p(0,!0)),y(E).attr("d","M"+U+"L"+N+"L"+H.substr(1)).call(l.singleFillStyle,t)):y(E).attr("d",H+"Z").call(l.singleFillStyle,t))):L&&("tonext"===_.fill.substr(0,6)&&H&&O?("tonext"===_.fill?y(L).attr("d",H+"Z"+O+"Z").call(l.singleFillStyle,t):y(L).attr("d",H+"L"+O.substr(1)+"Z").call(l.singleFillStyle,t),_._polygons=_._polygons.concat(z)):(Z(L),_._polygons=null)),_._prevRevpath=q,_._prevPolygons=V):(E?Z(E):L&&Z(L),_._polygons=_._prevRevpath=_._prevPolygons=null),M.datum(f),S.datum(f),function(e,i,a){var o,u=a[0].trace,f=c.hasMarkers(u),h=c.hasText(u),p=tt(u),d=et,m=et;if(f||h){var g=s,_=u.stackgroup,w=_&&"infer zero"===t._fullLayout._scatterStackOpts[x._id+b._id][_].stackgaps;u.marker.maxdisplayed||u._needsCull?g=w?K:J:_&&!w&&(g=Q),f&&(d=g),h&&(m=g)}var T,k=(o=e.selectAll("path.point").data(d,p)).enter().append("path").classed("point",!0);v&&k.call(l.pointStyle,u,t).call(l.translatePoints,x,b).style("opacity",0).transition().style("opacity",1),o.order(),f&&(T=l.makePointStyleFns(u)),o.each((function(e){var i=n.select(this),a=y(i);l.translatePoint(e,a,x,b)?(l.singlePointStyle(e,a,u,T,t),r.layerClipId&&l.hideOutsideRangePoint(e,a,x,b,u.xcalendar,u.ycalendar),u.customdata&&i.classed("plotly-customdata",null!==e.data&&void 0!==e.data)):a.remove()})),v?o.exit().transition().style("opacity",0).remove():o.exit().remove(),(o=i.selectAll("g").data(m,p)).enter().append("g").classed("textpoint",!0).append("text"),o.order(),o.each((function(t){var e=n.select(this),i=y(e.select("text"));l.translatePoint(t,i,x,b)?r.layerClipId&&l.hideOutsideRangePoint(t,e,x,b,u.xcalendar,u.ycalendar):e.remove()})),o.selectAll("text").call(l.textPointStyle,u,t).each((function(t){var e=x.c2p(t.x),r=b.c2p(t.y);n.select(this).selectAll("tspan.line").each((function(){y(n.select(this)).attr({x:e,y:r})}))})),o.exit().remove()}(M,S,f);var X=!1===_.cliponaxis?null:r.layerClipId;l.setClipUrl(M,X,t),l.setClipUrl(S,X,t)}function Z(t){y(t).attr("d","M0,0Z")}function J(t){return t.filter((function(t){return!t.gap&&t.vis}))}function K(t){return t.filter((function(t){return t.vis}))}function Q(t){return t.filter((function(t){return!t.gap}))}function $(t){return t.id}function tt(t){if(t.ids)return $}function et(){return!1}}e.exports=function(t,e,r,i,a,c){var u,h,d=!a,m=!!a&&a.duration>0,g=f(t,e,r);((u=i.selectAll("g.trace").data(g,(function(t){return t[0].trace.uid}))).enter().append("g").attr("class",(function(t){return"trace scatter trace"+t[0].trace.uid})).style("stroke-miterlimit",2),u.order(),function(t,e,r){e.each((function(e){var i=o(n.select(this),"g","fills");l.setClipUrl(i,r.layerClipId,t);var a=e[0].trace,c=[];a._ownfill&&c.push("_ownFill"),a._nexttrace&&c.push("_nextFill");var u=i.selectAll("g").data(c,s);u.enter().append("g"),u.exit().each((function(t){a[t]=null})).remove(),u.order().each((function(t){a[t]=o(n.select(this),"path","js-fill")}))}))}(t,u,e),m)?(c&&(h=c()),n.transition().duration(a.duration).ease(a.easing).each("end",(function(){h&&h()})).each("interrupt",(function(){h&&h()})).each((function(){i.selectAll("g.trace").each((function(r,n){p(t,n,e,r,g,this,a)}))}))):u.each((function(r,n){p(t,n,e,r,g,this,a)}));d&&u.exit().remove(),i.selectAll("path:not([d])").remove()}},{"../../components/drawing":383,"../../lib":498,"../../lib/polygon":510,"../../registry":633,"./line_points":936,"./link_traces":938,"./subtypes":947,"@plotly/d3":58}],944:[function(t,e,r){"use strict";var n=t("./subtypes");e.exports=function(t,e){var r,i,a,o,s=t.cd,l=t.xaxis,c=t.yaxis,u=[],f=s[0].trace;if(!n.hasMarkers(f)&&!n.hasText(f))return[];if(!1===e)for(r=0;r0){var h=i.c2l(u);i._lowerLogErrorBound||(i._lowerLogErrorBound=h),i._lowerErrorBound=Math.min(i._lowerLogErrorBound,h)}}else o[s]=[-l[0]*r,l[1]*r]}return o}e.exports=function(t,e,r){var n=[i(t.x,t.error_x,e[0],r.xaxis),i(t.y,t.error_y,e[1],r.yaxis),i(t.z,t.error_z,e[2],r.zaxis)],a=function(t){for(var e=0;e-1?-1:t.indexOf("right")>-1?1:0}function b(t){return null==t?0:t.indexOf("top")>-1?-1:t.indexOf("bottom")>-1?1:0}function _(t,e){return e(4*t)}function w(t){return p[t]}function T(t,e,r,n,i){var a=null;if(l.isArrayOrTypedArray(t)){a=[];for(var o=0;o=0){var m=function(t,e,r){var n,i=(r+1)%3,a=(r+2)%3,o=[],l=[];for(n=0;n=0&&f("surfacecolor",h||p);for(var d=["x","y","z"],m=0;m<3;++m){var g="projection."+d[m];f(g+".show")&&(f(g+".opacity"),f(g+".scale"))}var v=n.getComponentMethod("errorbars","supplyDefaults");v(t,e,h||p||r,{axis:"z"}),v(t,e,h||p||r,{axis:"y",inherit:"z"}),v(t,e,h||p||r,{axis:"x",inherit:"z"})}else e.visible=!1}},{"../../lib":498,"../../registry":633,"../scatter/line_defaults":935,"../scatter/marker_defaults":941,"../scatter/subtypes":947,"../scatter/text_defaults":948,"./attributes":950}],955:[function(t,e,r){"use strict";e.exports={plot:t("./convert"),attributes:t("./attributes"),markerSymbols:t("../../constants/gl3d_markers"),supplyDefaults:t("./defaults"),colorbar:[{container:"marker",min:"cmin",max:"cmax"},{container:"line",min:"cmin",max:"cmax"}],calc:t("./calc"),moduleType:"trace",name:"scatter3d",basePlotModule:t("../../plots/gl3d"),categories:["gl3d","symbols","showLegend","scatter-like"],meta:{}}},{"../../constants/gl3d_markers":472,"../../plots/gl3d":593,"./attributes":950,"./calc":951,"./convert":953,"./defaults":954}],956:[function(t,e,r){"use strict";var n=t("../scatter/attributes"),i=t("../../plots/attributes"),a=t("../../plots/template_attributes").hovertemplateAttrs,o=t("../../plots/template_attributes").texttemplateAttrs,s=t("../../components/colorscale/attributes"),l=t("../../lib/extend").extendFlat,c=n.marker,u=n.line,f=c.line;e.exports={carpet:{valType:"string",editType:"calc"},a:{valType:"data_array",editType:"calc"},b:{valType:"data_array",editType:"calc"},mode:l({},n.mode,{dflt:"markers"}),text:l({},n.text,{}),texttemplate:o({editType:"plot"},{keys:["a","b","text"]}),hovertext:l({},n.hovertext,{}),line:{color:u.color,width:u.width,dash:u.dash,shape:l({},u.shape,{values:["linear","spline"]}),smoothing:u.smoothing,editType:"calc"},connectgaps:n.connectgaps,fill:l({},n.fill,{values:["none","toself","tonext"],dflt:"none"}),fillcolor:n.fillcolor,marker:l({symbol:c.symbol,opacity:c.opacity,maxdisplayed:c.maxdisplayed,size:c.size,sizeref:c.sizeref,sizemin:c.sizemin,sizemode:c.sizemode,line:l({width:f.width,editType:"calc"},s("marker.line")),gradient:c.gradient,editType:"calc"},s("marker")),textfont:n.textfont,textposition:n.textposition,selected:n.selected,unselected:n.unselected,hoverinfo:l({},i.hoverinfo,{flags:["a","b","text","name"]}),hoveron:n.hoveron,hovertemplate:a()}},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plots/attributes":545,"../../plots/template_attributes":628,"../scatter/attributes":922}],957:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("../scatter/colorscale_calc"),a=t("../scatter/arrays_to_calcdata"),o=t("../scatter/calc_selection"),s=t("../scatter/calc").calcMarkerSize,l=t("../carpet/lookup_carpetid");e.exports=function(t,e){var r=e._carpetTrace=l(t,e);if(r&&r.visible&&"legendonly"!==r.visible){var c;e.xaxis=r.xaxis,e.yaxis=r.yaxis;var u,f,h=e._length,p=new Array(h),d=!1;for(c=0;c")}return o}function y(t,e){var r;r=t.labelprefix&&t.labelprefix.length>0?t.labelprefix.replace(/ = $/,""):t._hovertitle,g.push(r+": "+e.toFixed(3)+t.labelsuffix)}}},{"../../lib":498,"../scatter/hover":933}],962:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../scatter/marker_colorbar"),formatLabels:t("./format_labels"),calc:t("./calc"),plot:t("./plot"),style:t("../scatter/style").style,styleOnSelect:t("../scatter/style").styleOnSelect,hoverPoints:t("./hover"),selectPoints:t("../scatter/select"),eventData:t("./event_data"),moduleType:"trace",name:"scattercarpet",basePlotModule:t("../../plots/cartesian"),categories:["svg","carpet","symbols","showLegend","carpetDependent","zoomScale"],meta:{}}},{"../../plots/cartesian":563,"../scatter/marker_colorbar":940,"../scatter/select":944,"../scatter/style":946,"./attributes":956,"./calc":957,"./defaults":958,"./event_data":959,"./format_labels":960,"./hover":961,"./plot":963}],963:[function(t,e,r){"use strict";var n=t("../scatter/plot"),i=t("../../plots/cartesian/axes"),a=t("../../components/drawing");e.exports=function(t,e,r,o){var s,l,c,u=r[0][0].carpet,f={xaxis:i.getFromId(t,u.xaxis||"x"),yaxis:i.getFromId(t,u.yaxis||"y"),plot:e.plot};for(n(t,f,r,o),s=0;s")}(c,m,t,l[0].t.labels),t.hovertemplate=c.hovertemplate,[t]}}},{"../../components/fx":401,"../../constants/numerical":474,"../../lib":498,"../scatter/get_trace_color":932,"./attributes":964}],970:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../scatter/marker_colorbar"),formatLabels:t("./format_labels"),calc:t("./calc"),calcGeoJSON:t("./plot").calcGeoJSON,plot:t("./plot").plot,style:t("./style"),styleOnSelect:t("../scatter/style").styleOnSelect,hoverPoints:t("./hover"),eventData:t("./event_data"),selectPoints:t("./select"),moduleType:"trace",name:"scattergeo",basePlotModule:t("../../plots/geo"),categories:["geo","symbols","showLegend","scatter-like"],meta:{}}},{"../../plots/geo":584,"../scatter/marker_colorbar":940,"../scatter/style":946,"./attributes":964,"./calc":965,"./defaults":966,"./event_data":967,"./format_labels":968,"./hover":969,"./plot":971,"./select":972,"./style":973}],971:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../../lib/topojson_utils").getTopojsonFeatures,o=t("../../lib/geojson_utils"),s=t("../../lib/geo_location_utils"),l=t("../../plots/cartesian/autorange").findExtremes,c=t("../../constants/numerical").BADNUM,u=t("../scatter/calc").calcMarkerSize,f=t("../scatter/subtypes"),h=t("./style");e.exports={calcGeoJSON:function(t,e){var r,n,i=t[0].trace,o=e[i.geo],f=o._subplot,h=i._length;if(Array.isArray(i.locations)){var p=i.locationmode,d="geojson-id"===p?s.extractTraceFeature(t):a(i,f.topojson);for(r=0;r=g,w=2*b,T={},k=l.makeCalcdata(e,"x"),A=y.makeCalcdata(e,"y"),M=s(e,l,"x",k),S=s(e,y,"y",A),E=M.vals,L=S.vals;e._x=E,e._y=L,e.xperiodalignment&&(e._origX=k,e._xStarts=M.starts,e._xEnds=M.ends),e.yperiodalignment&&(e._origY=A,e._yStarts=S.starts,e._yEnds=S.ends);var C=new Array(w),P=new Array(b);for(r=0;r1&&i.extendFlat(s.line,p.linePositions(t,r,n));if(s.errorX||s.errorY){var l=p.errorBarPositions(t,r,n,a,o);s.errorX&&i.extendFlat(s.errorX,l.x),s.errorY&&i.extendFlat(s.errorY,l.y)}s.text&&(i.extendFlat(s.text,{positions:n},p.textPosition(t,r,s.text,s.marker)),i.extendFlat(s.textSel,{positions:n},p.textPosition(t,r,s.text,s.markerSel)),i.extendFlat(s.textUnsel,{positions:n},p.textPosition(t,r,s.text,s.markerUnsel)));return s}(t,0,e,C,E,L),z=d(t,x);return f(o,e),_?O.marker&&(I=O.marker.sizeAvg||Math.max(O.marker.size,3)):I=c(e,b),u(t,e,l,y,E,L,I),O.errorX&&v(e,l,O.errorX),O.errorY&&v(e,y,O.errorY),O.fill&&!z.fill2d&&(z.fill2d=!0),O.marker&&!z.scatter2d&&(z.scatter2d=!0),O.line&&!z.line2d&&(z.line2d=!0),!O.errorX&&!O.errorY||z.error2d||(z.error2d=!0),O.text&&!z.glText&&(z.glText=!0),O.marker&&(O.marker.snap=b),z.lineOptions.push(O.line),z.errorXOptions.push(O.errorX),z.errorYOptions.push(O.errorY),z.fillOptions.push(O.fill),z.markerOptions.push(O.marker),z.markerSelectedOptions.push(O.markerSel),z.markerUnselectedOptions.push(O.markerUnsel),z.textOptions.push(O.text),z.textSelectedOptions.push(O.textSel),z.textUnselectedOptions.push(O.textUnsel),z.selectBatch.push([]),z.unselectBatch.push([]),T._scene=z,T.index=z.count,T.x=E,T.y=L,T.positions=C,z.count++,[{x:!1,y:!1,t:T,trace:e}]}},{"../../constants/numerical":474,"../../lib":498,"../../plots/cartesian/align_period":546,"../../plots/cartesian/autorange":548,"../../plots/cartesian/axis_ids":553,"../scatter/calc":923,"../scatter/colorscale_calc":925,"./constants":977,"./convert":978,"./scene_update":986,"@plotly/point-cluster":59}],977:[function(t,e,r){"use strict";e.exports={TOO_MANY_POINTS:1e5,SYMBOL_SDF_SIZE:200,SYMBOL_SIZE:20,SYMBOL_STROKE:1,DOT_RE:/-dot/,OPEN_RE:/-open/,DASHES:{solid:[1],dot:[1,1],dash:[4,1],longdash:[8,1],dashdot:[4,1,1,1],longdashdot:[8,1,1,1]}}},{}],978:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("svg-path-sdf"),a=t("color-normalize"),o=t("../../registry"),s=t("../../lib"),l=t("../../components/drawing"),c=t("../../plots/cartesian/axis_ids"),u=t("../../lib/gl_format_color").formatColor,f=t("../scatter/subtypes"),h=t("../scatter/make_bubble_size_func"),p=t("./helpers"),d=t("./constants"),m=t("../../constants/interactions").DESELECTDIM,g={start:1,left:1,end:-1,right:-1,middle:0,center:0,bottom:1,top:-1},v=t("../../components/fx/helpers").appendArrayPointValue;function y(t,e){var r,i=t._fullLayout,a=e._length,o=e.textfont,l=e.textposition,c=Array.isArray(l)?l:[l],u=o.color,f=o.size,h=o.family,p={},d=t._context.plotGlPixelRatio,m=e.texttemplate;if(m){p.text=[];var g=i._d3locale,y=Array.isArray(m),x=y?Math.min(m.length,a):a,b=y?function(t){return m[t]}:function(){return m};for(r=0;rd.TOO_MANY_POINTS||f.hasMarkers(e)?"rect":"round";if(c&&e.connectgaps){var h=n[0],p=n[1];for(i=0;i1?l[i]:l[0]:l,d=Array.isArray(c)?c.length>1?c[i]:c[0]:c,m=g[p],v=g[d],y=u?u/.8+1:0,x=-v*y-.5*v;o.offset[i]=[m*y/h,x/h]}}return o}}},{"../../components/drawing":383,"../../components/fx/helpers":397,"../../constants/interactions":473,"../../lib":498,"../../lib/gl_format_color":494,"../../plots/cartesian/axis_ids":553,"../../registry":633,"../scatter/make_bubble_size_func":939,"../scatter/subtypes":947,"./constants":977,"./helpers":982,"color-normalize":84,"fast-isnumeric":185,"svg-path-sdf":305}],979:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../registry"),a=t("./helpers"),o=t("./attributes"),s=t("../scatter/constants"),l=t("../scatter/subtypes"),c=t("../scatter/xy_defaults"),u=t("../scatter/period_defaults"),f=t("../scatter/marker_defaults"),h=t("../scatter/line_defaults"),p=t("../scatter/fillcolor_defaults"),d=t("../scatter/text_defaults");e.exports=function(t,e,r,m){function g(r,i){return n.coerce(t,e,o,r,i)}var v=!!t.marker&&a.isOpenSymbol(t.marker.symbol),y=l.isBubble(t),x=c(t,e,m,g);if(x){u(t,e,m,g),g("xhoverformat"),g("yhoverformat");var b=x100},r.isDotSymbol=function(t){return"string"==typeof t?n.DOT_RE.test(t):t>200}},{"./constants":977}],983:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../lib"),a=t("../scatter/get_trace_color");function o(t,e,r,o){var s=t.xa,l=t.ya,c=t.distance,u=t.dxy,f=t.index,h={pointNumber:f,x:e[f],y:r[f]};h.tx=Array.isArray(o.text)?o.text[f]:o.text,h.htx=Array.isArray(o.hovertext)?o.hovertext[f]:o.hovertext,h.data=Array.isArray(o.customdata)?o.customdata[f]:o.customdata,h.tp=Array.isArray(o.textposition)?o.textposition[f]:o.textposition;var p=o.textfont;p&&(h.ts=i.isArrayOrTypedArray(p.size)?p.size[f]:p.size,h.tc=Array.isArray(p.color)?p.color[f]:p.color,h.tf=Array.isArray(p.family)?p.family[f]:p.family);var d=o.marker;d&&(h.ms=i.isArrayOrTypedArray(d.size)?d.size[f]:d.size,h.mo=i.isArrayOrTypedArray(d.opacity)?d.opacity[f]:d.opacity,h.mx=i.isArrayOrTypedArray(d.symbol)?d.symbol[f]:d.symbol,h.mc=i.isArrayOrTypedArray(d.color)?d.color[f]:d.color);var m=d&&d.line;m&&(h.mlc=Array.isArray(m.color)?m.color[f]:m.color,h.mlw=i.isArrayOrTypedArray(m.width)?m.width[f]:m.width);var g=d&&d.gradient;g&&"none"!==g.type&&(h.mgt=Array.isArray(g.type)?g.type[f]:g.type,h.mgc=Array.isArray(g.color)?g.color[f]:g.color);var v=s.c2p(h.x,!0),y=l.c2p(h.y,!0),x=h.mrc||1,b=o.hoverlabel;b&&(h.hbg=Array.isArray(b.bgcolor)?b.bgcolor[f]:b.bgcolor,h.hbc=Array.isArray(b.bordercolor)?b.bordercolor[f]:b.bordercolor,h.hts=i.isArrayOrTypedArray(b.font.size)?b.font.size[f]:b.font.size,h.htc=Array.isArray(b.font.color)?b.font.color[f]:b.font.color,h.htf=Array.isArray(b.font.family)?b.font.family[f]:b.font.family,h.hnl=i.isArrayOrTypedArray(b.namelength)?b.namelength[f]:b.namelength);var _=o.hoverinfo;_&&(h.hi=Array.isArray(_)?_[f]:_);var w=o.hovertemplate;w&&(h.ht=Array.isArray(w)?w[f]:w);var T={};T[t.index]=h;var k=o._origX,A=o._origY,M=i.extendFlat({},t,{color:a(o,h),x0:v-x,x1:v+x,xLabelVal:k?k[f]:h.x,y0:y-x,y1:y+x,yLabelVal:A?A[f]:h.y,cd:T,distance:c,spikeDistance:u,hovertemplate:h.ht});return h.htx?M.text=h.htx:h.tx?M.text=h.tx:o.text&&(M.text=o.text),i.fillText(h,o,M),n.getComponentMethod("errorbars","hoverInfo")(h,o,M),M}e.exports={hoverPoints:function(t,e,r,n){var i,a,s,l,c,u,f,h,p,d,m=t.cd,g=m[0].t,v=m[0].trace,y=t.xa,x=t.ya,b=g.x,_=g.y,w=y.c2p(e),T=x.c2p(r),k=t.distance;if(g.tree){var A=y.p2c(w-k),M=y.p2c(w+k),S=x.p2c(T-k),E=x.p2c(T+k);i="x"===n?g.tree.range(Math.min(A,M),Math.min(x._rl[0],x._rl[1]),Math.max(A,M),Math.max(x._rl[0],x._rl[1])):g.tree.range(Math.min(A,M),Math.min(S,E),Math.max(A,M),Math.max(S,E))}else i=g.ids;var L=k;if("x"===n){var C=!!v.xperiodalignment,P=!!v.yperiodalignment;for(u=0;u=Math.min(I,O)&&w<=Math.max(I,O)?0:1/0}if(f=Math.min(z,D)&&T<=Math.max(z,D)?0:1/0}d=Math.sqrt(f*f+h*h),s=i[u]}}}else for(u=i.length-1;u>-1;u--)l=b[a=i[u]],c=_[a],f=y.c2p(l)-w,h=x.c2p(c)-T,(p=Math.sqrt(f*f+h*h))y.glText.length){var T=_-y.glText.length;for(m=0;mr&&(isNaN(e[n])||isNaN(e[n+1]));)n-=2;t.positions=e.slice(r,n+2)}return t})),y.line2d.update(y.lineOptions)),y.error2d){var A=(y.errorXOptions||[]).concat(y.errorYOptions||[]);y.error2d.update(A)}y.scatter2d&&y.scatter2d.update(y.markerOptions),y.fillOrder=s.repeat(null,_),y.fill2d&&(y.fillOptions=y.fillOptions.map((function(t,e){var n=r[e];if(t&&n&&n[0]&&n[0].trace){var i,a,o=n[0],s=o.trace,l=o.t,c=y.lineOptions[e],u=[];s._ownfill&&u.push(e),s._nexttrace&&u.push(e+1),u.length&&(y.fillOrder[e]=u);var f,h,p=[],d=c&&c.positions||l.positions;if("tozeroy"===s.fill){for(f=0;ff&&isNaN(d[h+1]);)h-=2;0!==d[f+1]&&(p=[d[f],0]),p=p.concat(d.slice(f,h+2)),0!==d[h+1]&&(p=p.concat([d[h],0]))}else if("tozerox"===s.fill){for(f=0;ff&&isNaN(d[h]);)h-=2;0!==d[f]&&(p=[0,d[f+1]]),p=p.concat(d.slice(f,h+2)),0!==d[h]&&(p=p.concat([0,d[h+1]]))}else if("toself"===s.fill||"tonext"===s.fill){for(p=[],i=0,t.splitNull=!0,a=0;a-1;for(m=0;m<_;m++){var L=r[m][0],C=L.trace,P=L.t,I=P.index,O=C._length,z=P.x,D=P.y;if(C.selectedpoints||S||E){if(S||(S=!0),C.selectedpoints){var R=y.selectBatch[I]=s.selIndices2selPoints(C),F={};for(g=0;g")}function u(t){return t+"\xb0"}}e.exports={hoverPoints:function(t,e,r){var o=t.cd,c=o[0].trace,u=t.xa,f=t.ya,h=t.subplot,p=360*(e>=0?Math.floor((e+180)/360):Math.ceil((e-180)/360)),d=e-p;if(n.getClosest(o,(function(t){var e=t.lonlat;if(e[0]===s)return 1/0;var n=i.modHalf(e[0],360),a=e[1],o=h.project([n,a]),l=o.x-u.c2p([d,a]),c=o.y-f.c2p([n,r]),p=Math.max(3,t.mrc||0);return Math.max(Math.sqrt(l*l+c*c)-p,1-3/p)}),t),!1!==t.index){var m=o[t.index],g=m.lonlat,v=[i.modHalf(g[0],360)+p,g[1]],y=u.c2p(v),x=f.c2p(v),b=m.mrc||1;t.x0=y-b,t.x1=y+b,t.y0=x-b,t.y1=x+b;var _={};_[c.subplot]={_subplot:h};var w=c._module.formatLabels(m,c,_);return t.lonLabel=w.lonLabel,t.latLabel=w.latLabel,t.color=a(c,m),t.extraText=l(c,m,o[0].t.labels),t.hovertemplate=c.hovertemplate,[t]}},getExtraText:l}},{"../../components/fx":401,"../../constants/numerical":474,"../../lib":498,"../scatter/get_trace_color":932}],994:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../scatter/marker_colorbar"),formatLabels:t("./format_labels"),calc:t("../scattergeo/calc"),plot:t("./plot"),hoverPoints:t("./hover").hoverPoints,eventData:t("./event_data"),selectPoints:t("./select"),styleOnSelect:function(t,e){e&&e[0].trace._glTrace.update(e)},moduleType:"trace",name:"scattermapbox",basePlotModule:t("../../plots/mapbox"),categories:["mapbox","gl","symbols","showLegend","scatter-like"],meta:{}}},{"../../plots/mapbox":608,"../scatter/marker_colorbar":940,"../scattergeo/calc":965,"./attributes":988,"./defaults":990,"./event_data":991,"./format_labels":992,"./hover":993,"./plot":995,"./select":996}],995:[function(t,e,r){"use strict";var n=t("./convert"),i=t("../../plots/mapbox/constants").traceLayerPrefix,a=["fill","line","circle","symbol"];function o(t,e){this.type="scattermapbox",this.subplot=t,this.uid=e,this.sourceIds={fill:"source-"+e+"-fill",line:"source-"+e+"-line",circle:"source-"+e+"-circle",symbol:"source-"+e+"-symbol"},this.layerIds={fill:i+e+"-fill",line:i+e+"-line",circle:i+e+"-circle",symbol:i+e+"-symbol"},this.below=null}var s=o.prototype;s.addSource=function(t,e){this.subplot.map.addSource(this.sourceIds[t],{type:"geojson",data:e.geojson})},s.setSourceData=function(t,e){this.subplot.map.getSource(this.sourceIds[t]).setData(e.geojson)},s.addLayer=function(t,e,r){this.subplot.addLayer({type:t,id:this.layerIds[t],source:this.sourceIds[t],layout:e.layout,paint:e.paint},r)},s.update=function(t){var e,r,i,o=this.subplot,s=o.map,l=n(o.gd,t),c=o.belowLookup["trace-"+this.uid];if(c!==this.below){for(e=a.length-1;e>=0;e--)r=a[e],s.removeLayer(this.layerIds[r]);for(e=0;e=0;e--){var r=a[e];t.removeLayer(this.layerIds[r]),t.removeSource(this.sourceIds[r])}},e.exports=function(t,e){for(var r=e[0].trace,i=new o(t,r.uid),s=n(t.gd,e),l=i.below=t.belowLookup["trace-"+r.uid],c=0;c")}}e.exports={hoverPoints:function(t,e,r,a){var o=n(t,e,r,a);if(o&&!1!==o[0].index){var s=o[0];if(void 0===s.index)return o;var l=t.subplot,c=s.cd[s.index],u=s.trace;if(l.isPtInside(c))return s.xLabelVal=void 0,s.yLabelVal=void 0,i(c,u,l,s),s.hovertemplate=u.hovertemplate,o}},makeHoverPointText:i}},{"../scatter/hover":933}],1002:[function(t,e,r){"use strict";e.exports={moduleType:"trace",name:"scatterpolar",basePlotModule:t("../../plots/polar"),categories:["polar","symbols","showLegend","scatter-like"],attributes:t("./attributes"),supplyDefaults:t("./defaults").supplyDefaults,colorbar:t("../scatter/marker_colorbar"),formatLabels:t("./format_labels"),calc:t("./calc"),plot:t("./plot"),style:t("../scatter/style").style,styleOnSelect:t("../scatter/style").styleOnSelect,hoverPoints:t("./hover").hoverPoints,selectPoints:t("../scatter/select"),meta:{}}},{"../../plots/polar":617,"../scatter/marker_colorbar":940,"../scatter/select":944,"../scatter/style":946,"./attributes":997,"./calc":998,"./defaults":999,"./format_labels":1e3,"./hover":1001,"./plot":1003}],1003:[function(t,e,r){"use strict";var n=t("../scatter/plot"),i=t("../../constants/numerical").BADNUM;e.exports=function(t,e,r){for(var a=e.layers.frontplot.select("g.scatterlayer"),o={xaxis:e.xaxis,yaxis:e.yaxis,plot:e.framework,layerClipId:e._hasClipOnAxisFalse?e.clipIds.forTraces:null},s=e.radialAxis,l=e.angularAxis,c=0;c=c&&(y.marker.cluster=d.tree),y.marker&&(y.markerSel.positions=y.markerUnsel.positions=y.marker.positions=_),y.line&&_.length>1&&l.extendFlat(y.line,s.linePositions(t,p,_)),y.text&&(l.extendFlat(y.text,{positions:_},s.textPosition(t,p,y.text,y.marker)),l.extendFlat(y.textSel,{positions:_},s.textPosition(t,p,y.text,y.markerSel)),l.extendFlat(y.textUnsel,{positions:_},s.textPosition(t,p,y.text,y.markerUnsel))),y.fill&&!h.fill2d&&(h.fill2d=!0),y.marker&&!h.scatter2d&&(h.scatter2d=!0),y.line&&!h.line2d&&(h.line2d=!0),y.text&&!h.glText&&(h.glText=!0),h.lineOptions.push(y.line),h.fillOptions.push(y.fill),h.markerOptions.push(y.marker),h.markerSelectedOptions.push(y.markerSel),h.markerUnselectedOptions.push(y.markerUnsel),h.textOptions.push(y.text),h.textSelectedOptions.push(y.textSel),h.textUnselectedOptions.push(y.textUnsel),h.selectBatch.push([]),h.unselectBatch.push([]),d.x=w,d.y=T,d.rawx=w,d.rawy=T,d.r=g,d.theta=v,d.positions=_,d._scene=h,d.index=h.count,h.count++}})),a(t,e,r)}},e.exports.reglPrecompiled={}},{"../../lib":498,"../scattergl/constants":977,"../scattergl/convert":978,"../scattergl/plot":985,"../scattergl/scene_update":986,"@plotly/point-cluster":59,"fast-isnumeric":185}],1012:[function(t,e,r){"use strict";var n=t("../../plots/template_attributes").hovertemplateAttrs,i=t("../../plots/template_attributes").texttemplateAttrs,a=t("../../lib/extend").extendFlat,o=t("../scatter/attributes"),s=t("../../plots/attributes"),l=o.line;e.exports={mode:o.mode,real:{valType:"data_array",editType:"calc+clearAxisTypes"},imag:{valType:"data_array",editType:"calc+clearAxisTypes"},text:o.text,texttemplate:i({editType:"plot"},{keys:["real","imag","text"]}),hovertext:o.hovertext,line:{color:l.color,width:l.width,dash:l.dash,shape:a({},l.shape,{values:["linear","spline"]}),smoothing:l.smoothing,editType:"calc"},connectgaps:o.connectgaps,marker:o.marker,cliponaxis:a({},o.cliponaxis,{dflt:!1}),textposition:o.textposition,textfont:o.textfont,fill:a({},o.fill,{values:["none","toself","tonext"],dflt:"none"}),fillcolor:o.fillcolor,hoverinfo:a({},s.hoverinfo,{flags:["real","imag","text","name"]}),hoveron:o.hoveron,hovertemplate:n(),selected:o.selected,unselected:o.unselected}},{"../../lib/extend":488,"../../plots/attributes":545,"../../plots/template_attributes":628,"../scatter/attributes":922}],1013:[function(t,e,r){"use strict";var n=t("fast-isnumeric"),i=t("../../constants/numerical").BADNUM,a=t("../scatter/colorscale_calc"),o=t("../scatter/arrays_to_calcdata"),s=t("../scatter/calc_selection"),l=t("../scatter/calc").calcMarkerSize;e.exports=function(t,e){for(var r=t._fullLayout,c=e.subplot,u=r[c].realaxis,f=r[c].imaginaryaxis,h=u.makeCalcdata(e,"real"),p=f.makeCalcdata(e,"imag"),d=e._length,m=new Array(d),g=0;g")}}e.exports={hoverPoints:function(t,e,r,a){var o=n(t,e,r,a);if(o&&!1!==o[0].index){var s=o[0];if(void 0===s.index)return o;var l=t.subplot,c=s.cd[s.index],u=s.trace;if(l.isPtInside(c))return s.xLabelVal=void 0,s.yLabelVal=void 0,i(c,u,l,s),s.hovertemplate=u.hovertemplate,o}},makeHoverPointText:i}},{"../scatter/hover":933}],1017:[function(t,e,r){"use strict";e.exports={moduleType:"trace",name:"scattersmith",basePlotModule:t("../../plots/smith"),categories:["smith","symbols","showLegend","scatter-like"],attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../scatter/marker_colorbar"),formatLabels:t("./format_labels"),calc:t("./calc"),plot:t("./plot"),style:t("../scatter/style").style,styleOnSelect:t("../scatter/style").styleOnSelect,hoverPoints:t("./hover").hoverPoints,selectPoints:t("../scatter/select"),meta:{}}},{"../../plots/smith":624,"../scatter/marker_colorbar":940,"../scatter/select":944,"../scatter/style":946,"./attributes":1012,"./calc":1013,"./defaults":1014,"./format_labels":1015,"./hover":1016,"./plot":1018}],1018:[function(t,e,r){"use strict";var n=t("../scatter/plot"),i=t("../../constants/numerical").BADNUM,a=t("../../plots/smith/helpers").smith;e.exports=function(t,e,r){for(var o=e.layers.frontplot.select("g.scatterlayer"),s={xaxis:e.xaxis,yaxis:e.yaxis,plot:e.framework,layerClipId:e._hasClipOnAxisFalse?e.clipIds.forTraces:null},l=0;l"),o.hovertemplate=h.hovertemplate,a}function x(t,e){v.push(t._hovertitle+": "+e)}}},{"../scatter/hover":933}],1025:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../scatter/marker_colorbar"),formatLabels:t("./format_labels"),calc:t("./calc"),plot:t("./plot"),style:t("../scatter/style").style,styleOnSelect:t("../scatter/style").styleOnSelect,hoverPoints:t("./hover"),selectPoints:t("../scatter/select"),eventData:t("./event_data"),moduleType:"trace",name:"scatterternary",basePlotModule:t("../../plots/ternary"),categories:["ternary","symbols","showLegend","scatter-like"],meta:{}}},{"../../plots/ternary":629,"../scatter/marker_colorbar":940,"../scatter/select":944,"../scatter/style":946,"./attributes":1019,"./calc":1020,"./defaults":1021,"./event_data":1022,"./format_labels":1023,"./hover":1024,"./plot":1026}],1026:[function(t,e,r){"use strict";var n=t("../scatter/plot");e.exports=function(t,e,r){var i=e.plotContainer;i.select(".scatterlayer").selectAll("*").remove();var a={xaxis:e.xaxis,yaxis:e.yaxis,plot:i,layerClipId:e._hasClipOnAxisFalse?e.clipIdRelative:null},o=e.layers.frontplot.select("g.scatterlayer");n(t,a,r,o)}},{"../scatter/plot":943}],1027:[function(t,e,r){"use strict";var n=t("../scatter/attributes"),i=t("../../components/colorscale/attributes"),a=t("../../plots/cartesian/axis_format_attributes").axisHoverFormat,o=t("../../plots/template_attributes").hovertemplateAttrs,s=t("../scattergl/attributes"),l=t("../../plots/cartesian/constants").idRegex,c=t("../../plot_api/plot_template").templatedArray,u=t("../../lib/extend").extendFlat,f=n.marker,h=f.line,p=u(i("marker.line",{editTypeOverride:"calc"}),{width:u({},h.width,{editType:"calc"}),editType:"calc"}),d=u(i("marker"),{symbol:f.symbol,size:u({},f.size,{editType:"markerSize"}),sizeref:f.sizeref,sizemin:f.sizemin,sizemode:f.sizemode,opacity:f.opacity,colorbar:f.colorbar,line:p,editType:"calc"});function m(t){return{valType:"info_array",freeLength:!0,editType:"calc",items:{valType:"subplotid",regex:l[t],editType:"plot"}}}d.color.editType=d.cmin.editType=d.cmax.editType="style",e.exports={dimensions:c("dimension",{visible:{valType:"boolean",dflt:!0,editType:"calc"},label:{valType:"string",editType:"calc"},values:{valType:"data_array",editType:"calc+clearAxisTypes"},axis:{type:{valType:"enumerated",values:["linear","log","date","category"],editType:"calc+clearAxisTypes"},matches:{valType:"boolean",dflt:!1,editType:"calc"},editType:"calc+clearAxisTypes"},editType:"calc+clearAxisTypes"}),text:u({},s.text,{}),hovertext:u({},s.hovertext,{}),hovertemplate:o(),xhoverformat:a("x"),yhoverformat:a("y"),marker:d,xaxes:m("x"),yaxes:m("y"),diagonal:{visible:{valType:"boolean",dflt:!0,editType:"calc"},editType:"calc"},showupperhalf:{valType:"boolean",dflt:!0,editType:"calc"},showlowerhalf:{valType:"boolean",dflt:!0,editType:"calc"},selected:{marker:s.selected.marker,editType:"calc"},unselected:{marker:s.unselected.marker,editType:"calc"},opacity:s.opacity}},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plot_api/plot_template":538,"../../plots/cartesian/axis_format_attributes":552,"../../plots/cartesian/constants":556,"../../plots/template_attributes":628,"../scatter/attributes":922,"../scattergl/attributes":974}],1028:[function(t,e,r){"use strict";var n=t("../../registry"),i=t("../../components/grid");e.exports={moduleType:"trace",name:"splom",categories:["gl","regl","cartesian","symbols","showLegend","scatter-like"],attributes:t("./attributes"),supplyDefaults:t("./defaults"),colorbar:t("../scatter/marker_colorbar"),calc:t("./calc"),plot:t("./plot"),hoverPoints:t("./hover").hoverPoints,selectPoints:t("./select"),editStyle:t("./edit_style"),meta:{}},n.register(i)},{"../../components/grid":405,"../../registry":633,"../scatter/marker_colorbar":940,"./attributes":1027,"./calc":1030,"./defaults":1031,"./edit_style":1032,"./hover":1034,"./plot":1036,"./select":1038}],1029:[function(t,e,r){"use strict";var n=t("regl-line2d"),i=t("../../registry"),a=t("../../lib/prepare_regl"),o=t("../../plots/get_data").getModuleCalcData,s=t("../../plots/cartesian"),l=t("../../plots/cartesian/axis_ids").getFromId,c=t("../../plots/cartesian/axes").shouldShowZeroLine,u={};function f(t,e,r){for(var n=r.matrixOptions.data.length,i=e._visibleDims,a=r.viewOpts.ranges=new Array(n),o=0;oh?b.sizeAvg||Math.max(b.size,3):a(e,x),p=0;pa&&l||i-1,P=!0;if(o(x)||!!p.selectedpoints||C){var I=p._length;if(p.selectedpoints){m.selectBatch=p.selectedpoints;var O=p.selectedpoints,z={};for(l=0;l1&&(u=m[y-1],h=g[y-1],d=v[y-1]),e=0;eu?"-":"+")+"x")).replace("y",(f>h?"-":"+")+"y")).replace("z",(p>d?"-":"+")+"z");var L=function(){y=0,M=[],S=[],E=[]};(!y||y2?t.slice(1,e-1):2===e?[(t[0]+t[1])/2]:t}function p(t){var e=t.length;return 1===e?[.5,.5]:[t[1]-t[0],t[e-1]-t[e-2]]}function d(t,e){var r=t.fullSceneLayout,i=t.dataScale,u=e._len,f={};function d(t,e){var n=r[e],o=i[c[e]];return a.simpleMap(t,(function(t){return n.d2l(t)*o}))}if(f.vectors=l(d(e._u,"xaxis"),d(e._v,"yaxis"),d(e._w,"zaxis"),u),!u)return{positions:[],cells:[]};var m=d(e._Xs,"xaxis"),g=d(e._Ys,"yaxis"),v=d(e._Zs,"zaxis");if(f.meshgrid=[m,g,v],f.gridFill=e._gridFill,e._slen)f.startingPositions=l(d(e._startsX,"xaxis"),d(e._startsY,"yaxis"),d(e._startsZ,"zaxis"));else{for(var y=g[0],x=h(m),b=h(v),_=new Array(x.length*b.length),w=0,T=0;T=0};v?(r=Math.min(g.length,x.length),l=function(t){return A(g[t])&&M(t)},f=function(t){return String(g[t])}):(r=Math.min(y.length,x.length),l=function(t){return A(y[t])&&M(t)},f=function(t){return String(y[t])}),_&&(r=Math.min(r,b.length));for(var S=0;S1){for(var P=a.randstr(),I=0;I"),name:A||z("name")?y.name:void 0,color:k("hoverlabel.bgcolor")||x.color,borderColor:k("hoverlabel.bordercolor"),fontFamily:k("hoverlabel.font.family"),fontSize:k("hoverlabel.font.size"),fontColor:k("hoverlabel.font.color"),nameLength:k("hoverlabel.namelength"),textAlign:k("hoverlabel.align"),hovertemplate:A,hovertemplateLabels:P,eventData:l};g&&(F.x0=E-i.rInscribed*i.rpx1,F.x1=E+i.rInscribed*i.rpx1,F.idealAlign=i.pxmid[0]<0?"left":"right"),v&&(F.x=E,F.idealAlign=E<0?"left":"right");var B=[];o.loneHover(F,{container:a._hoverlayer.node(),outerContainer:a._paper.node(),gd:r,inOut_bbox:B}),l[0].bbox=B[0],d._hasHoverLabel=!0}if(v){var N=t.select("path.surface");h.styleOne(N,i,y,{hovered:!0})}d._hasHoverEvent=!0,r.emit("plotly_hover",{points:l||[f(i,y,h.eventDataKeys)],event:n.event})}})),t.on("mouseout",(function(e){var i=r._fullLayout,a=r._fullData[d.index],s=n.select(this).datum();if(d._hasHoverEvent&&(e.originalEvent=n.event,r.emit("plotly_unhover",{points:[f(s,a,h.eventDataKeys)],event:n.event}),d._hasHoverEvent=!1),d._hasHoverLabel&&(o.loneUnhover(i._hoverlayer.node()),d._hasHoverLabel=!1),v){var l=t.select("path.surface");h.styleOne(l,s,a,{hovered:!1})}})),t.on("click",(function(t){var e=r._fullLayout,a=r._fullData[d.index],s=g&&(c.isHierarchyRoot(t)||c.isLeaf(t)),u=c.getPtId(t),p=c.isEntry(t)?c.findEntryWithChild(m,u):c.findEntryWithLevel(m,u),v=c.getPtId(p),y={points:[f(t,a,h.eventDataKeys)],event:n.event};s||(y.nextLevel=v);var x=l.triggerHandler(r,"plotly_"+d.type+"click",y);if(!1!==x&&e.hovermode&&(r._hoverdata=[f(t,a,h.eventDataKeys)],o.click(r,n.event)),!s&&!1!==x&&!r._dragging&&!r._transitioning){i.call("_storeDirectGUIEdit",a,e._tracePreGUI[a.uid],{level:a.level});var b={data:[{level:v}],traces:[d.index]},_={frame:{redraw:!1,duration:h.transitionTime},transition:{duration:h.transitionTime,easing:h.transitionEasing},mode:"immediate",fromcurrent:!0};o.loneUnhover(e._hoverlayer.node()),i.call("animate",r,b,_)}}))}},{"../../components/fx":401,"../../components/fx/helpers":397,"../../lib":498,"../../lib/events":487,"../../registry":633,"../pie/helpers":901,"./helpers":1050,"@plotly/d3":58}],1050:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("../../components/color"),a=t("../../lib/setcursor"),o=t("../pie/helpers");function s(t){return t.data.data.pid}r.findEntryWithLevel=function(t,e){var n;return e&&t.eachAfter((function(t){if(r.getPtId(t)===e)return n=t.copy()})),n||t},r.findEntryWithChild=function(t,e){var n;return t.eachAfter((function(t){for(var i=t.children||[],a=0;a0)},r.getMaxDepth=function(t){return t.maxdepth>=0?t.maxdepth:1/0},r.isHeader=function(t,e){return!(r.isLeaf(t)||t.depth===e._maxDepth-1)},r.getParent=function(t,e){return r.findEntryWithLevel(t,s(e))},r.listPath=function(t,e){var n=t.parent;if(!n)return[];var i=e?[n.data[e]]:[n];return r.listPath(n,e).concat(i)},r.getPath=function(t){return r.listPath(t,"label").join("/")+"/"},r.formatValue=o.formatPieValue,r.formatPercent=function(t,e){var r=n.formatPercent(t,0);return"0%"===r&&(r=o.formatPiePercent(t,e)),r}},{"../../components/color":361,"../../lib":498,"../../lib/setcursor":519,"../pie/helpers":901}],1051:[function(t,e,r){"use strict";e.exports={moduleType:"trace",name:"sunburst",basePlotModule:t("./base_plot"),categories:[],animatable:!0,attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults"),supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc").calc,crossTraceCalc:t("./calc").crossTraceCalc,plot:t("./plot").plot,style:t("./style").style,colorbar:t("../scatter/marker_colorbar"),meta:{}}},{"../scatter/marker_colorbar":940,"./attributes":1044,"./base_plot":1045,"./calc":1046,"./defaults":1048,"./layout_attributes":1052,"./layout_defaults":1053,"./plot":1054,"./style":1055}],1052:[function(t,e,r){"use strict";e.exports={sunburstcolorway:{valType:"colorlist",editType:"calc"},extendsunburstcolors:{valType:"boolean",dflt:!0,editType:"calc"}}},{}],1053:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes");e.exports=function(t,e){function r(r,a){return n.coerce(t,e,i,r,a)}r("sunburstcolorway",e.colorway),r("extendsunburstcolors")}},{"../../lib":498,"./layout_attributes":1052}],1054:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("d3-hierarchy"),a=t("d3-interpolate").interpolate,o=t("../../components/drawing"),s=t("../../lib"),l=t("../../lib/svg_text_utils"),c=t("../bar/uniform_text"),u=c.recordMinTextSize,f=c.clearMinTextSize,h=t("../pie/plot"),p=t("../pie/helpers").getRotationAngle,d=h.computeTransform,m=h.transformInsideText,g=t("./style").styleOne,v=t("../bar/style").resizeText,y=t("./fx"),x=t("./constants"),b=t("./helpers");function _(t,e,c,f){var h=t._fullLayout,v=!h.uniformtext.mode&&b.hasTransition(f),_=n.select(c).selectAll("g.slice"),T=e[0],k=T.trace,A=T.hierarchy,M=b.findEntryWithLevel(A,k.level),S=b.getMaxDepth(k),E=h._size,L=k.domain,C=E.w*(L.x[1]-L.x[0]),P=E.h*(L.y[1]-L.y[0]),I=.5*Math.min(C,P),O=T.cx=E.l+E.w*(L.x[1]+L.x[0])/2,z=T.cy=E.t+E.h*(1-L.y[0])-P/2;if(!M)return _.remove();var D=null,R={};v&&_.each((function(t){R[b.getPtId(t)]={rpx0:t.rpx0,rpx1:t.rpx1,x0:t.x0,x1:t.x1,transform:t.transform},!D&&b.isEntry(t)&&(D=t)}));var F=function(t){return i.partition().size([2*Math.PI,t.height+1])(t)}(M).descendants(),B=M.height+1,N=0,j=S;T.hasMultipleRoots&&b.isHierarchyRoot(M)&&(F=F.slice(1),B-=1,N=1,j+=1),F=F.filter((function(t){return t.y1<=j}));var U=p(k.rotation);U&&F.forEach((function(t){t.x0+=U,t.x1+=U}));var V=Math.min(B,S),H=function(t){return(t-N)/V*I},q=function(t,e){return[t*Math.cos(e),-t*Math.sin(e)]},G=function(t){return s.pathAnnulus(t.rpx0,t.rpx1,t.x0,t.x1,O,z)},Y=function(t){return O+w(t)[0]*(t.transform.rCenter||0)+(t.transform.x||0)},W=function(t){return z+w(t)[1]*(t.transform.rCenter||0)+(t.transform.y||0)};(_=_.data(F,b.getPtId)).enter().append("g").classed("slice",!0),v?_.exit().transition().each((function(){var t=n.select(this);t.select("path.surface").transition().attrTween("d",(function(t){var e=function(t){var e,r=b.getPtId(t),n=R[r],i=R[b.getPtId(M)];if(i){var o=(t.x1>i.x1?2*Math.PI:0)+U;e=t.rpx1X?2*Math.PI:0)+U;e={x0:i,x1:i}}else e={rpx0:I,rpx1:I},s.extendFlat(e,K(t));else e={rpx0:0,rpx1:0};else e={x0:U,x1:U};return a(e,n)}(t);return function(t){return G(e(t))}})):f.attr("d",G),c.call(y,M,t,e,{eventDataKeys:x.eventDataKeys,transitionTime:x.CLICK_TRANSITION_TIME,transitionEasing:x.CLICK_TRANSITION_EASING}).call(b.setSliceCursor,t,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:t._transitioning}),f.call(g,i,k);var p=s.ensureSingle(c,"g","slicetext"),_=s.ensureSingle(p,"text","",(function(t){t.attr("data-notex",1)})),w=s.ensureUniformFontSize(t,b.determineTextFont(k,i,h.font));_.text(r.formatSliceLabel(i,M,k,e,h)).classed("slicetext",!0).attr("text-anchor","middle").call(o.font,w).call(l.convertToTspans,t);var A=o.bBox(_.node());i.transform=m(A,i,T),i.transform.targetX=Y(i),i.transform.targetY=W(i);var S=function(t,e){var r=t.transform;return d(r,e),r.fontSize=w.size,u(k.type,r,h),s.getTextTransform(r)};v?_.transition().attrTween("transform",(function(t){var e=function(t){var e,r=R[b.getPtId(t)],n=t.transform;if(r)e=r;else if(e={rpx1:t.rpx1,transform:{textPosAngle:n.textPosAngle,scale:0,rotate:n.rotate,rCenter:n.rCenter,x:n.x,y:n.y}},D)if(t.parent)if(X){var i=t.x1>X?2*Math.PI:0;e.x0=e.x1=i}else s.extendFlat(e,K(t));else e.x0=e.x1=U;else e.x0=e.x1=U;var o=a(e.transform.textPosAngle,t.transform.textPosAngle),l=a(e.rpx1,t.rpx1),c=a(e.x0,t.x0),f=a(e.x1,t.x1),p=a(e.transform.scale,n.scale),d=a(e.transform.rotate,n.rotate),m=0===n.rCenter?3:0===e.transform.rCenter?1/3:1,g=a(e.transform.rCenter,n.rCenter);return function(t){var e=l(t),r=c(t),i=f(t),a=function(t){return g(Math.pow(t,m))}(t),s={pxmid:q(e,(r+i)/2),rpx1:e,transform:{textPosAngle:o(t),rCenter:a,x:n.x,y:n.y}};return u(k.type,n,h),{transform:{targetX:Y(s),targetY:W(s),scale:p(t),rotate:d(t),rCenter:a}}}}(t);return function(t){return S(e(t),A)}})):_.attr("transform",S(i,A))}))}function w(t){return e=t.rpx1,r=t.transform.textPosAngle,[e*Math.sin(r),-e*Math.cos(r)];var e,r}r.plot=function(t,e,r,i){var a,o,s=t._fullLayout,l=s._sunburstlayer,c=!r,u=!s.uniformtext.mode&&b.hasTransition(r);(f("sunburst",s),(a=l.selectAll("g.trace.sunburst").data(e,(function(t){return t[0].trace.uid}))).enter().append("g").classed("trace",!0).classed("sunburst",!0).attr("stroke-linejoin","round"),a.order(),u)?(i&&(o=i()),n.transition().duration(r.duration).ease(r.easing).each("end",(function(){o&&o()})).each("interrupt",(function(){o&&o()})).each((function(){l.selectAll("g.trace").each((function(e){_(t,e,this,r)}))}))):(a.each((function(e){_(t,e,this,r)})),s.uniformtext.mode&&v(t,s._sunburstlayer.selectAll(".trace"),"sunburst"));c&&a.exit().remove()},r.formatSliceLabel=function(t,e,r,n,i){var a=r.texttemplate,o=r.textinfo;if(!(a||o&&"none"!==o))return"";var l=i.separators,c=n[0],u=t.data.data,f=c.hierarchy,h=b.isHierarchyRoot(t),p=b.getParent(f,t),d=b.getValue(t);if(!a){var m,g=o.split("+"),v=function(t){return-1!==g.indexOf(t)},y=[];if(v("label")&&u.label&&y.push(u.label),u.hasOwnProperty("v")&&v("value")&&y.push(b.formatValue(u.v,l)),!h){v("current path")&&y.push(b.getPath(t.data));var x=0;v("percent parent")&&x++,v("percent entry")&&x++,v("percent root")&&x++;var _=x>1;if(x){var w,T=function(t){m=b.formatPercent(w,l),_&&(m+=" of "+t),y.push(m)};v("percent parent")&&!h&&(w=d/b.getValue(p),T("parent")),v("percent entry")&&(w=d/b.getValue(e),T("entry")),v("percent root")&&(w=d/b.getValue(f),T("root"))}}return v("text")&&(m=s.castOption(r,u.i,"text"),s.isValidTextValue(m)&&y.push(m)),y.join("
")}var k=s.castOption(r,u.i,"texttemplate");if(!k)return"";var A={};u.label&&(A.label=u.label),u.hasOwnProperty("v")&&(A.value=u.v,A.valueLabel=b.formatValue(u.v,l)),A.currentPath=b.getPath(t.data),h||(A.percentParent=d/b.getValue(p),A.percentParentLabel=b.formatPercent(A.percentParent,l),A.parent=b.getPtLabel(p)),A.percentEntry=d/b.getValue(e),A.percentEntryLabel=b.formatPercent(A.percentEntry,l),A.entry=b.getPtLabel(e),A.percentRoot=d/b.getValue(f),A.percentRootLabel=b.formatPercent(A.percentRoot,l),A.root=b.getPtLabel(f),u.hasOwnProperty("color")&&(A.color=u.color);var M=s.castOption(r,u.i,"text");return(s.isValidTextValue(M)||""===M)&&(A.text=M),A.customdata=s.castOption(r,u.i,"customdata"),s.texttemplateString(k,A,i._d3locale,A,r._meta||{})}},{"../../components/drawing":383,"../../lib":498,"../../lib/svg_text_utils":524,"../bar/style":657,"../bar/uniform_text":659,"../pie/helpers":901,"../pie/plot":905,"./constants":1047,"./fx":1049,"./helpers":1050,"./style":1055,"@plotly/d3":58,"d3-hierarchy":110,"d3-interpolate":111}],1055:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../components/color"),a=t("../../lib"),o=t("../bar/uniform_text").resizeText;function s(t,e,r){var n=e.data.data,o=!e.children,s=n.i,l=a.castOption(r,s,"marker.line.color")||i.defaultLine,c=a.castOption(r,s,"marker.line.width")||0;t.style("stroke-width",c).call(i.fill,n.color).call(i.stroke,l).style("opacity",o?r.leaf.opacity:null)}e.exports={style:function(t){var e=t._fullLayout._sunburstlayer.selectAll(".trace");o(t,e,"sunburst"),e.each((function(t){var e=n.select(this),r=t[0].trace;e.style("opacity",r.opacity),e.selectAll("path.surface").each((function(t){n.select(this).call(s,t,r)}))}))},styleOne:s}},{"../../components/color":361,"../../lib":498,"../bar/uniform_text":659,"@plotly/d3":58}],1056:[function(t,e,r){"use strict";var n=t("../../components/color"),i=t("../../components/colorscale/attributes"),a=t("../../plots/cartesian/axis_format_attributes").axisHoverFormat,o=t("../../plots/template_attributes").hovertemplateAttrs,s=t("../../plots/attributes"),l=t("../../lib/extend").extendFlat,c=t("../../plot_api/edit_types").overrideAll;function u(t){return{show:{valType:"boolean",dflt:!1},start:{valType:"number",dflt:null,editType:"plot"},end:{valType:"number",dflt:null,editType:"plot"},size:{valType:"number",dflt:null,min:0,editType:"plot"},project:{x:{valType:"boolean",dflt:!1},y:{valType:"boolean",dflt:!1},z:{valType:"boolean",dflt:!1}},color:{valType:"color",dflt:n.defaultLine},usecolormap:{valType:"boolean",dflt:!1},width:{valType:"number",min:1,max:16,dflt:2},highlight:{valType:"boolean",dflt:!0},highlightcolor:{valType:"color",dflt:n.defaultLine},highlightwidth:{valType:"number",min:1,max:16,dflt:2}}}var f=e.exports=c(l({z:{valType:"data_array"},x:{valType:"data_array"},y:{valType:"data_array"},text:{valType:"string",dflt:"",arrayOk:!0},hovertext:{valType:"string",dflt:"",arrayOk:!0},hovertemplate:o(),xhoverformat:a("x"),yhoverformat:a("y"),zhoverformat:a("z"),connectgaps:{valType:"boolean",dflt:!1,editType:"calc"},surfacecolor:{valType:"data_array"}},i("",{colorAttr:"z or surfacecolor",showScaleDflt:!0,autoColorDflt:!1,editTypeOverride:"calc"}),{contours:{x:u(),y:u(),z:u()},hidesurface:{valType:"boolean",dflt:!1},lightposition:{x:{valType:"number",min:-1e5,max:1e5,dflt:10},y:{valType:"number",min:-1e5,max:1e5,dflt:1e4},z:{valType:"number",min:-1e5,max:1e5,dflt:0}},lighting:{ambient:{valType:"number",min:0,max:1,dflt:.8},diffuse:{valType:"number",min:0,max:1,dflt:.8},specular:{valType:"number",min:0,max:2,dflt:.05},roughness:{valType:"number",min:0,max:1,dflt:.5},fresnel:{valType:"number",min:0,max:5,dflt:.2}},opacity:{valType:"number",min:0,max:1,dflt:1},opacityscale:{valType:"any",editType:"calc"},_deprecated:{zauto:l({},i.zauto,{}),zmin:l({},i.zmin,{}),zmax:l({},i.zmax,{})},hoverinfo:l({},s.hoverinfo),showlegend:l({},s.showlegend,{dflt:!1})}),"calc","nested");f.x.editType=f.y.editType=f.z.editType="calc+clearAxisTypes",f.transforms=void 0},{"../../components/color":361,"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plot_api/edit_types":531,"../../plots/attributes":545,"../../plots/cartesian/axis_format_attributes":552,"../../plots/template_attributes":628}],1057:[function(t,e,r){"use strict";var n=t("../../components/colorscale/calc");e.exports=function(t,e){e.surfacecolor?n(t,e,{vals:e.surfacecolor,containerStr:"",cLetter:"c"}):n(t,e,{vals:e.z,containerStr:"",cLetter:"c"})}},{"../../components/colorscale/calc":369}],1058:[function(t,e,r){"use strict";var n=t("../../../stackgl_modules").gl_surface3d,i=t("../../../stackgl_modules").ndarray,a=t("../../../stackgl_modules").ndarray_linear_interpolate.d2,o=t("../heatmap/interp2d"),s=t("../heatmap/find_empties"),l=t("../../lib").isArrayOrTypedArray,c=t("../../lib/gl_format_color").parseColorScale,u=t("../../lib/str2rgbarray"),f=t("../../components/colorscale").extractOpts;function h(t,e,r){this.scene=t,this.uid=r,this.surface=e,this.data=null,this.showContour=[!1,!1,!1],this.contourStart=[null,null,null],this.contourEnd=[null,null,null],this.contourSize=[0,0,0],this.minValues=[1/0,1/0,1/0],this.maxValues=[-1/0,-1/0,-1/0],this.dataScaleX=1,this.dataScaleY=1,this.refineData=!0,this.objectOffset=[0,0,0]}var p=h.prototype;p.getXat=function(t,e,r,n){var i=l(this.data.x)?l(this.data.x[0])?this.data.x[e][t]:this.data.x[t]:t;return void 0===r?i:n.d2l(i,0,r)},p.getYat=function(t,e,r,n){var i=l(this.data.y)?l(this.data.y[0])?this.data.y[e][t]:this.data.y[e]:e;return void 0===r?i:n.d2l(i,0,r)},p.getZat=function(t,e,r,n){var i=this.data.z[e][t];return null===i&&this.data.connectgaps&&this.data._interpolatedZ&&(i=this.data._interpolatedZ[e][t]),void 0===r?i:n.d2l(i,0,r)},p.handlePick=function(t){if(t.object===this.surface){var e=(t.data.index[0]-1)/this.dataScaleX-1,r=(t.data.index[1]-1)/this.dataScaleY-1,n=Math.max(Math.min(Math.round(e),this.data.z[0].length-1),0),i=Math.max(Math.min(Math.round(r),this.data._ylength-1),0);t.index=[n,i],t.traceCoordinate=[this.getXat(n,i),this.getYat(n,i),this.getZat(n,i)],t.dataCoordinate=[this.getXat(n,i,this.data.xcalendar,this.scene.fullSceneLayout.xaxis),this.getYat(n,i,this.data.ycalendar,this.scene.fullSceneLayout.yaxis),this.getZat(n,i,this.data.zcalendar,this.scene.fullSceneLayout.zaxis)];for(var a=0;a<3;a++){var o=t.dataCoordinate[a];null!=o&&(t.dataCoordinate[a]*=this.scene.dataScale[a])}var s=this.data.hovertext||this.data.text;return Array.isArray(s)&&s[i]&&void 0!==s[i][n]?t.textLabel=s[i][n]:t.textLabel=s||"",t.data.dataCoordinate=t.dataCoordinate.slice(),this.surface.highlight(t.data),this.scene.glplot.spikes.position=t.dataCoordinate,!0}};var d=[2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,2927,2939,2953,2957,2963,2969,2971,2999];function m(t,e){if(t0){r=d[n];break}return r}function y(t,e){if(!(t<1||e<1)){for(var r=g(t),n=g(e),i=1,a=0;a_;)r--,r/=v(r),++r1?n:1},p.refineCoords=function(t){for(var e=this.dataScaleX,r=this.dataScaleY,n=t[0].shape[0],a=t[0].shape[1],o=0|Math.floor(t[0].shape[0]*e+1),s=0|Math.floor(t[0].shape[1]*r+1),l=1+n+1,c=1+a+1,u=i(new Float32Array(l*c),[l,c]),f=[1/e,0,0,0,1/r,0,0,0,1],h=0;h0&&null!==this.contourStart[t]&&null!==this.contourEnd[t]&&this.contourEnd[t]>this.contourStart[t]))for(i[t]=!0,e=this.contourStart[t];ea&&(this.minValues[e]=a),this.maxValues[e]",maxDimensionCount:60,overdrag:45,releaseTransitionDuration:120,releaseTransitionEase:"cubic-out",scrollbarCaptureWidth:18,scrollbarHideDelay:1e3,scrollbarHideDuration:1e3,scrollbarOffset:5,scrollbarWidth:8,transitionDuration:100,transitionEase:"cubic-out",uplift:5,wrapSpacer:" ",wrapSplitCharacter:" ",cn:{table:"table",tableControlView:"table-control-view",scrollBackground:"scroll-background",yColumn:"y-column",columnBlock:"column-block",scrollAreaClip:"scroll-area-clip",scrollAreaClipRect:"scroll-area-clip-rect",columnBoundary:"column-boundary",columnBoundaryClippath:"column-boundary-clippath",columnBoundaryRect:"column-boundary-rect",columnCells:"column-cells",columnCell:"column-cell",cellRect:"cell-rect",cellText:"cell-text",cellTextHolder:"cell-text-holder",scrollbarKit:"scrollbar-kit",scrollbar:"scrollbar",scrollbarSlider:"scrollbar-slider",scrollbarGlyph:"scrollbar-glyph",scrollbarCaptureZone:"scrollbar-capture-zone"}}},{}],1065:[function(t,e,r){"use strict";var n=t("./constants"),i=t("../../lib/extend").extendFlat,a=t("fast-isnumeric");function o(t){if(Array.isArray(t)){for(var e=0,r=0;r=e||c===t.length-1)&&(n[i]=o,o.key=l++,o.firstRowIndex=s,o.lastRowIndex=c,o={firstRowIndex:null,lastRowIndex:null,rows:[]},i+=a,s=c+1,a=0);return n}e.exports=function(t,e){var r=l(e.cells.values),p=function(t){return t.slice(e.header.values.length,t.length)},d=l(e.header.values);d.length&&!d[0].length&&(d[0]=[""],d=l(d));var m=d.concat(p(r).map((function(){return c((d[0]||[""]).length)}))),g=e.domain,v=Math.floor(t._fullLayout._size.w*(g.x[1]-g.x[0])),y=Math.floor(t._fullLayout._size.h*(g.y[1]-g.y[0])),x=e.header.values.length?m[0].map((function(){return e.header.height})):[n.emptyHeaderHeight],b=r.length?r[0].map((function(){return e.cells.height})):[],_=x.reduce(s,0),w=h(b,y-_+n.uplift),T=f(h(x,_),[]),k=f(w,T),A={},M=e._fullInput.columnorder.concat(p(r.map((function(t,e){return e})))),S=m.map((function(t,r){var n=Array.isArray(e.columnwidth)?e.columnwidth[Math.min(r,e.columnwidth.length-1)]:e.columnwidth;return a(n)?Number(n):1})),E=S.reduce(s,0);S=S.map((function(t){return t/E*v}));var L=Math.max(o(e.header.line.width),o(e.cells.line.width)),C={key:e.uid+t._context.staticPlot,translateX:g.x[0]*t._fullLayout._size.w,translateY:t._fullLayout._size.h*(1-g.y[1]),size:t._fullLayout._size,width:v,maxLineWidth:L,height:y,columnOrder:M,groupHeight:y,rowBlocks:k,headerRowBlocks:T,scrollY:0,cells:i({},e.cells,{values:r}),headerCells:i({},e.header,{values:m}),gdColumns:m.map((function(t){return t[0]})),gdColumnsOriginalOrder:m.map((function(t){return t[0]})),prevPages:[0,0],scrollbarState:{scrollbarScrollInProgress:!1},columns:m.map((function(t,e){var r=A[t];return A[t]=(r||0)+1,{key:t+"__"+A[t],label:t,specIndex:e,xIndex:M[e],xScale:u,x:void 0,calcdata:void 0,columnWidth:S[e]}}))};return C.columns.forEach((function(t){t.calcdata=C,t.x=u(t)})),C}},{"../../lib/extend":488,"./constants":1064,"fast-isnumeric":185}],1066:[function(t,e,r){"use strict";var n=t("../../lib/extend").extendFlat;r.splitToPanels=function(t){var e=[0,0],r=n({},t,{key:"header",type:"header",page:0,prevPages:e,currentRepaint:[null,null],dragHandle:!0,values:t.calcdata.headerCells.values[t.specIndex],rowBlocks:t.calcdata.headerRowBlocks,calcdata:n({},t.calcdata,{cells:t.calcdata.headerCells})});return[n({},t,{key:"cells1",type:"cells",page:0,prevPages:e,currentRepaint:[null,null],dragHandle:!1,values:t.calcdata.cells.values[t.specIndex],rowBlocks:t.calcdata.rowBlocks}),n({},t,{key:"cells2",type:"cells",page:1,prevPages:e,currentRepaint:[null,null],dragHandle:!1,values:t.calcdata.cells.values[t.specIndex],rowBlocks:t.calcdata.rowBlocks}),r]},r.splitToCells=function(t){var e=function(t){var e=t.rowBlocks[t.page],r=e?e.rows[0].rowIndex:0,n=e?r+e.rows.length:0;return[r,n]}(t);return(t.values||[]).slice(e[0],e[1]).map((function(r,n){return{keyWithinBlock:n+("string"==typeof r&&r.match(/[<$&> ]/)?"_keybuster_"+Math.random():""),key:e[0]+n,column:t,calcdata:t.calcdata,page:t.page,rowBlocks:t.rowBlocks,value:r}}))}},{"../../lib/extend":488}],1067:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./attributes"),a=t("../../plots/domain").defaults;e.exports=function(t,e,r,o){function s(r,a){return n.coerce(t,e,i,r,a)}a(e,o,s),s("columnwidth"),s("header.values"),s("header.format"),s("header.align"),s("header.prefix"),s("header.suffix"),s("header.height"),s("header.line.width"),s("header.line.color"),s("header.fill.color"),n.coerceFont(s,"header.font",n.extendFlat({},o.font)),function(t,e){for(var r=t.columnorder||[],n=t.header.values.length,i=r.slice(0,n),a=i.slice().sort((function(t,e){return t-e})),o=i.map((function(t){return a.indexOf(t)})),s=o.length;s/i),l=!o||s;t.mayHaveMarkup=o&&i.match(/[<&>]/);var c,u="string"==typeof(c=i)&&c.match(n.latexCheck);t.latex=u;var f,h,p=u?"":T(t.calcdata.cells.prefix,e,r)||"",d=u?"":T(t.calcdata.cells.suffix,e,r)||"",m=u?null:T(t.calcdata.cells.format,e,r)||null,g=p+(m?a(m)(t.value):t.value)+d;if(t.wrappingNeeded=!t.wrapped&&!l&&!u&&(f=w(g)),t.cellHeightMayIncrease=s||u||t.mayHaveMarkup||(void 0===f?w(g):f),t.needsConvertToTspans=t.mayHaveMarkup||t.wrappingNeeded||t.latex,t.wrappingNeeded){var v=(" "===n.wrapSplitCharacter?g.replace(/i&&n.push(a),i+=l}return n}(i,l,s);1===c.length&&(c[0]===i.length-1?c.unshift(c[0]-1):c.push(c[0]+1)),c[0]%2&&c.reverse(),e.each((function(t,e){t.page=c[e],t.scrollY=l})),e.attr("transform",(function(t){var e=D(t.rowBlocks,t.page)-t.scrollY;return u(0,e)})),t&&(C(t,r,e,c,n.prevPages,n,0),C(t,r,e,c,n.prevPages,n,1),x(r,t))}}function L(t,e,r,a){return function(o){var s=o.calcdata?o.calcdata:o,l=e.filter((function(t){return s.key===t.key})),c=r||s.scrollbarState.dragMultiplier,u=s.scrollY;s.scrollY=void 0===a?s.scrollY+c*i.event.dy:a;var f=l.selectAll("."+n.cn.yColumn).selectAll("."+n.cn.columnBlock).filter(A);return E(t,f,l),s.scrollY===u}}function C(t,e,r,n,i,a,o){n[o]!==i[o]&&(clearTimeout(a.currentRepaint[o]),a.currentRepaint[o]=setTimeout((function(){var a=r.filter((function(t,e){return e===o&&n[e]!==i[e]}));b(t,e,a,r),i[o]=n[o]})))}function P(t,e,r,a){return function(){var o=i.select(e.parentNode);o.each((function(t){var e=t.fragments;o.selectAll("tspan.line").each((function(t,r){e[r].width=this.getComputedTextLength()}));var r,i,a=e[e.length-1].width,s=e.slice(0,-1),l=[],c=0,u=t.column.columnWidth-2*n.cellPad;for(t.value="";s.length;)c+(i=(r=s.shift()).width+a)>u&&(t.value+=l.join(n.wrapSpacer)+n.lineBreaker,l=[],c=0),l.push(r.text),c+=i;c&&(t.value+=l.join(n.wrapSpacer)),t.wrapped=!0})),o.selectAll("tspan.line").remove(),_(o.select("."+n.cn.cellText),r,t,a),i.select(e.parentNode.parentNode).call(z)}}function I(t,e,r,a,o){return function(){if(!o.settledY){var s=i.select(e.parentNode),l=B(o),c=o.key-l.firstRowIndex,f=l.rows[c].rowHeight,h=o.cellHeightMayIncrease?e.parentNode.getBoundingClientRect().height+2*n.cellPad:f,p=Math.max(h,f);p-l.rows[c].rowHeight&&(l.rows[c].rowHeight=p,t.selectAll("."+n.cn.columnCell).call(z),E(null,t.filter(A),0),x(r,a,!0)),s.attr("transform",(function(){var t=this.parentNode.getBoundingClientRect(),e=i.select(this.parentNode).select("."+n.cn.cellRect).node().getBoundingClientRect(),r=this.transform.baseVal.consolidate(),a=e.top-t.top+(r?r.matrix.f:n.cellPad);return u(O(o,i.select(this.parentNode).select("."+n.cn.cellTextHolder).node().getBoundingClientRect().width),a)})),o.settledY=!0}}}function O(t,e){switch(t.align){case"left":return n.cellPad;case"right":return t.column.columnWidth-(e||0)-n.cellPad;case"center":return(t.column.columnWidth-(e||0))/2;default:return n.cellPad}}function z(t){t.attr("transform",(function(t){var e=t.rowBlocks[0].auxiliaryBlocks.reduce((function(t,e){return t+R(e,1/0)}),0),r=R(B(t),t.key);return u(0,r+e)})).selectAll("."+n.cn.cellRect).attr("height",(function(t){return(e=B(t),r=t.key,e.rows[r-e.firstRowIndex]).rowHeight;var e,r}))}function D(t,e){for(var r=0,n=e-1;n>=0;n--)r+=F(t[n]);return r}function R(t,e){for(var r=0,n=0;n","<","|","/","\\"],dflt:">",editType:"plot"},thickness:{valType:"number",min:12,editType:"plot"},textfont:u({},s.textfont,{}),editType:"calc"},text:s.text,textinfo:l.textinfo,texttemplate:i({editType:"plot"},{keys:c.eventDataKeys.concat(["label","value"])}),hovertext:s.hovertext,hoverinfo:l.hoverinfo,hovertemplate:n({},{keys:c.eventDataKeys}),textfont:s.textfont,insidetextfont:s.insidetextfont,outsidetextfont:u({},s.outsidetextfont,{}),textposition:{valType:"enumerated",values:["top left","top center","top right","middle left","middle center","middle right","bottom left","bottom center","bottom right"],dflt:"top left",editType:"plot"},sort:s.sort,root:l.root,domain:o({name:"treemap",trace:!0,editType:"calc"})}},{"../../components/colorscale/attributes":368,"../../lib/extend":488,"../../plots/domain":579,"../../plots/template_attributes":628,"../pie/attributes":896,"../sunburst/attributes":1044,"./constants":1073}],1071:[function(t,e,r){"use strict";var n=t("../../plots/plots");r.name="treemap",r.plot=function(t,e,i,a){n.plotBasePlot(r.name,t,e,i,a)},r.clean=function(t,e,i,a){n.cleanBasePlot(r.name,t,e,i,a)}},{"../../plots/plots":614}],1072:[function(t,e,r){"use strict";var n=t("../sunburst/calc");r.calc=function(t,e){return n.calc(t,e)},r.crossTraceCalc=function(t){return n._runCrossTraceCalc("treemap",t)}},{"../sunburst/calc":1046}],1073:[function(t,e,r){"use strict";e.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:"poly",eventDataKeys:["currentPath","root","entry","percentRoot","percentEntry","percentParent"],gapWithPathbar:1}},{}],1074:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./attributes"),a=t("../../components/color"),o=t("../../plots/domain").defaults,s=t("../bar/defaults").handleText,l=t("../bar/constants").TEXTPAD,c=t("../../components/colorscale"),u=c.hasColorscale,f=c.handleDefaults;e.exports=function(t,e,r,c){function h(r,a){return n.coerce(t,e,i,r,a)}var p=h("labels"),d=h("parents");if(p&&p.length&&d&&d.length){var m=h("values");m&&m.length?h("branchvalues"):h("count"),h("level"),h("maxdepth"),"squarify"===h("tiling.packing")&&h("tiling.squarifyratio"),h("tiling.flip"),h("tiling.pad");var g=h("text");h("texttemplate"),e.texttemplate||h("textinfo",Array.isArray(g)?"text+label":"label"),h("hovertext"),h("hovertemplate");var v=h("pathbar.visible");s(t,e,c,h,"auto",{hasPathbar:v,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),h("textposition");var y=-1!==e.textposition.indexOf("bottom");h("marker.line.width")&&h("marker.line.color",c.paper_bgcolor);var x=h("marker.colors");(e._hasColorscale=u(t,"marker","colors")||(t.marker||{}).coloraxis)?f(t,e,c,h,{prefix:"marker.",cLetter:"c"}):h("marker.depthfade",!(x||[]).length);var b=2*e.textfont.size;h("marker.pad.t",y?b/4:b),h("marker.pad.l",b/4),h("marker.pad.r",b/4),h("marker.pad.b",y?b:b/4),e._hovered={marker:{line:{width:2,color:a.contrast(c.paper_bgcolor)}}},v&&(h("pathbar.thickness",e.pathbar.textfont.size+2*l),h("pathbar.side"),h("pathbar.edgeshape")),h("sort"),h("root.color"),o(e,c,h),e._length=null}else e.visible=!1}},{"../../components/color":361,"../../components/colorscale":373,"../../lib":498,"../../plots/domain":579,"../bar/constants":645,"../bar/defaults":647,"./attributes":1070}],1075:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../sunburst/helpers"),a=t("../bar/uniform_text").clearMinTextSize,o=t("../bar/style").resizeText,s=t("./plot_one");e.exports=function(t,e,r,l,c){var u,f,h=c.type,p=c.drawDescendants,d=t._fullLayout,m=d["_"+h+"layer"],g=!r;(a(h,d),(u=m.selectAll("g.trace."+h).data(e,(function(t){return t[0].trace.uid}))).enter().append("g").classed("trace",!0).classed(h,!0),u.order(),!d.uniformtext.mode&&i.hasTransition(r))?(l&&(f=l()),n.transition().duration(r.duration).ease(r.easing).each("end",(function(){f&&f()})).each("interrupt",(function(){f&&f()})).each((function(){m.selectAll("g.trace").each((function(e){s(t,e,this,r,p)}))}))):(u.each((function(e){s(t,e,this,r,p)})),d.uniformtext.mode&&o(t,m.selectAll(".trace"),h));g&&u.exit().remove()}},{"../bar/style":657,"../bar/uniform_text":659,"../sunburst/helpers":1050,"./plot_one":1084,"@plotly/d3":58}],1076:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../../components/drawing"),o=t("../../lib/svg_text_utils"),s=t("./partition"),l=t("./style").styleOne,c=t("./constants"),u=t("../sunburst/helpers"),f=t("../sunburst/fx");e.exports=function(t,e,r,h,p){var d=p.barDifY,m=p.width,g=p.height,v=p.viewX,y=p.viewY,x=p.pathSlice,b=p.toMoveInsideSlice,_=p.strTransform,w=p.hasTransition,T=p.handleSlicesExit,k=p.makeUpdateSliceInterpolator,A=p.makeUpdateTextInterpolator,M={},S=t._fullLayout,E=e[0],L=E.trace,C=E.hierarchy,P=m/L._entryDepth,I=u.listPath(r.data,"id"),O=s(C.copy(),[m,g],{packing:"dice",pad:{inner:0,top:0,left:0,right:0,bottom:0}}).descendants();(O=O.filter((function(t){var e=I.indexOf(t.data.id);return-1!==e&&(t.x0=P*e,t.x1=P*(e+1),t.y0=d,t.y1=d+g,t.onPathbar=!0,!0)}))).reverse(),(h=h.data(O,u.getPtId)).enter().append("g").classed("pathbar",!0),T(h,!0,M,[m,g],x),h.order();var z=h;w&&(z=z.transition().each("end",(function(){var e=n.select(this);u.setSliceCursor(e,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:!1})}))),z.each((function(s){s._x0=v(s.x0),s._x1=v(s.x1),s._y0=y(s.y0),s._y1=y(s.y1),s._hoverX=v(s.x1-Math.min(m,g)/2),s._hoverY=y(s.y1-g/2);var h=n.select(this),p=i.ensureSingle(h,"path","surface",(function(t){t.style("pointer-events","all")}));w?p.transition().attrTween("d",(function(t){var e=k(t,!0,M,[m,g]);return function(t){return x(e(t))}})):p.attr("d",x),h.call(f,r,t,e,{styleOne:l,eventDataKeys:c.eventDataKeys,transitionTime:c.CLICK_TRANSITION_TIME,transitionEasing:c.CLICK_TRANSITION_EASING}).call(u.setSliceCursor,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:t._transitioning}),p.call(l,s,L,{hovered:!1}),s._text=(u.getPtLabel(s)||"").split("
").join(" ")||"";var d=i.ensureSingle(h,"g","slicetext"),T=i.ensureSingle(d,"text","",(function(t){t.attr("data-notex",1)})),E=i.ensureUniformFontSize(t,u.determineTextFont(L,s,S.font,{onPathbar:!0}));T.text(s._text||" ").classed("slicetext",!0).attr("text-anchor","start").call(a.font,E).call(o.convertToTspans,t),s.textBB=a.bBox(T.node()),s.transform=b(s,{fontSize:E.size,onPathbar:!0}),s.transform.fontSize=E.size,w?T.transition().attrTween("transform",(function(t){var e=A(t,!0,M,[m,g]);return function(t){return _(e(t))}})):T.attr("transform",_(s))}))}},{"../../components/drawing":383,"../../lib":498,"../../lib/svg_text_utils":524,"../sunburst/fx":1049,"../sunburst/helpers":1050,"./constants":1073,"./partition":1082,"./style":1085,"@plotly/d3":58}],1077:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../../components/drawing"),o=t("../../lib/svg_text_utils"),s=t("./partition"),l=t("./style").styleOne,c=t("./constants"),u=t("../sunburst/helpers"),f=t("../sunburst/fx"),h=t("../sunburst/plot").formatSliceLabel;e.exports=function(t,e,r,p,d){var m=d.width,g=d.height,v=d.viewX,y=d.viewY,x=d.pathSlice,b=d.toMoveInsideSlice,_=d.strTransform,w=d.hasTransition,T=d.handleSlicesExit,k=d.makeUpdateSliceInterpolator,A=d.makeUpdateTextInterpolator,M=d.prevEntry,S=t._fullLayout,E=e[0].trace,L=-1!==E.textposition.indexOf("left"),C=-1!==E.textposition.indexOf("right"),P=-1!==E.textposition.indexOf("bottom"),I=!P&&!E.marker.pad.t||P&&!E.marker.pad.b,O=s(r,[m,g],{packing:E.tiling.packing,squarifyratio:E.tiling.squarifyratio,flipX:E.tiling.flip.indexOf("x")>-1,flipY:E.tiling.flip.indexOf("y")>-1,pad:{inner:E.tiling.pad,top:E.marker.pad.t,left:E.marker.pad.l,right:E.marker.pad.r,bottom:E.marker.pad.b}}).descendants(),z=1/0,D=-1/0;O.forEach((function(t){var e=t.depth;e>=E._maxDepth?(t.x0=t.x1=(t.x0+t.x1)/2,t.y0=t.y1=(t.y0+t.y1)/2):(z=Math.min(z,e),D=Math.max(D,e))})),p=p.data(O,u.getPtId),E._maxVisibleLayers=isFinite(D)?D-z+1:0,p.enter().append("g").classed("slice",!0),T(p,!1,{},[m,g],x),p.order();var R=null;if(w&&M){var F=u.getPtId(M);p.each((function(t){null===R&&u.getPtId(t)===F&&(R={x0:t.x0,x1:t.x1,y0:t.y0,y1:t.y1})}))}var B=function(){return R||{x0:0,x1:m,y0:0,y1:g}},N=p;return w&&(N=N.transition().each("end",(function(){var e=n.select(this);u.setSliceCursor(e,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})}))),N.each((function(s){var p=u.isHeader(s,E);s._x0=v(s.x0),s._x1=v(s.x1),s._y0=y(s.y0),s._y1=y(s.y1),s._hoverX=v(s.x1-E.marker.pad.r),s._hoverY=y(P?s.y1-E.marker.pad.b/2:s.y0+E.marker.pad.t/2);var d=n.select(this),T=i.ensureSingle(d,"path","surface",(function(t){t.style("pointer-events","all")}));w?T.transition().attrTween("d",(function(t){var e=k(t,!1,B(),[m,g]);return function(t){return x(e(t))}})):T.attr("d",x),d.call(f,r,t,e,{styleOne:l,eventDataKeys:c.eventDataKeys,transitionTime:c.CLICK_TRANSITION_TIME,transitionEasing:c.CLICK_TRANSITION_EASING}).call(u.setSliceCursor,t,{isTransitioning:t._transitioning}),T.call(l,s,E,{hovered:!1}),s.x0===s.x1||s.y0===s.y1?s._text="":s._text=p?I?"":u.getPtLabel(s)||"":h(s,r,E,e,S)||"";var M=i.ensureSingle(d,"g","slicetext"),O=i.ensureSingle(M,"text","",(function(t){t.attr("data-notex",1)})),z=i.ensureUniformFontSize(t,u.determineTextFont(E,s,S.font));O.text(s._text||" ").classed("slicetext",!0).attr("text-anchor",C?"end":L||p?"start":"middle").call(a.font,z).call(o.convertToTspans,t),s.textBB=a.bBox(O.node()),s.transform=b(s,{fontSize:z.size,isHeader:p}),s.transform.fontSize=z.size,w?O.transition().attrTween("transform",(function(t){var e=A(t,!1,B(),[m,g]);return function(t){return _(e(t))}})):O.attr("transform",_(s))})),R}},{"../../components/drawing":383,"../../lib":498,"../../lib/svg_text_utils":524,"../sunburst/fx":1049,"../sunburst/helpers":1050,"../sunburst/plot":1054,"./constants":1073,"./partition":1082,"./style":1085,"@plotly/d3":58}],1078:[function(t,e,r){"use strict";e.exports=function t(e,r,n){var i;n.swapXY&&(i=e.x0,e.x0=e.y0,e.y0=i,i=e.x1,e.x1=e.y1,e.y1=i),n.flipX&&(i=e.x0,e.x0=r[0]-e.x1,e.x1=r[0]-i),n.flipY&&(i=e.y0,e.y0=r[1]-e.y1,e.y1=r[1]-i);var a=e.children;if(a)for(var o=0;o-1?C+O:-(I+O):0,D={x0:P,x1:P,y0:z,y1:z+I},R=function(t,e,r){var n=v.tiling.pad,i=function(t){return t-n<=e.x0},a=function(t){return t+n>=e.x1},o=function(t){return t-n<=e.y0},s=function(t){return t+n>=e.y1};return t.x0===e.x0&&t.x1===e.x1&&t.y0===e.y0&&t.y1===e.y1?{x0:t.x0,x1:t.x1,y0:t.y0,y1:t.y1}:{x0:i(t.x0-n)?0:a(t.x0-n)?r[0]:t.x0,x1:i(t.x1+n)?0:a(t.x1+n)?r[0]:t.x1,y0:o(t.y0-n)?0:s(t.y0-n)?r[1]:t.y0,y1:o(t.y1+n)?0:s(t.y1+n)?r[1]:t.y1}},F=null,B={},N={},j=null,U=function(t,e){return e?B[h(t)]:N[h(t)]},V=function(t,e,r,n){if(e)return B[h(x)]||D;var i=N[v.level]||r;return function(t){return t.data.depth-b.data.depth=(n-=(y?g:g.r)-s)){var x=(r+n)/2;r=x,n=x}var b;f?i<(b=a-(y?g:g.b))&&b"===tt?(l.x-=a,c.x-=a,u.x-=a,f.x-=a):"/"===tt?(u.x-=a,f.x-=a,o.x-=a/2,s.x-=a/2):"\\"===tt?(l.x-=a,c.x-=a,o.x-=a/2,s.x-=a/2):"<"===tt&&(o.x-=a,s.x-=a),$(l),$(f),$(o),$(c),$(u),$(s),"M"+K(l.x,l.y)+"L"+K(c.x,c.y)+"L"+K(s.x,s.y)+"L"+K(u.x,u.y)+"L"+K(f.x,f.y)+"L"+K(o.x,o.y)+"Z"},toMoveInsideSlice:et,makeUpdateSliceInterpolator:nt,makeUpdateTextInterpolator:it,handleSlicesExit:at,hasTransition:A,strTransform:ot}):w.remove()}},{"../../lib":498,"../bar/constants":645,"../bar/plot":654,"../bar/uniform_text":659,"../sunburst/helpers":1050,"./constants":1073,"./draw_ancestors":1076,"@plotly/d3":58,"d3-interpolate":111}],1085:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../components/color"),a=t("../../lib"),o=t("../sunburst/helpers"),s=t("../bar/uniform_text").resizeText;function l(t,e,r,n){var s,l,c=(n||{}).hovered,u=e.data.data,f=u.i,h=u.color,p=o.isHierarchyRoot(e),d=1;if(c)s=r._hovered.marker.line.color,l=r._hovered.marker.line.width;else if(p&&h===r.root.color)d=100,s="rgba(0,0,0,0)",l=0;else if(s=a.castOption(r,f,"marker.line.color")||i.defaultLine,l=a.castOption(r,f,"marker.line.width")||0,!r._hasColorscale&&!e.onPathbar){var m=r.marker.depthfade;if(m){var g,v=i.combine(i.addOpacity(r._backgroundColor,.75),h);if(!0===m){var y=o.getMaxDepth(r);g=isFinite(y)?o.isLeaf(e)?0:r._maxVisibleLayers-(e.data.depth-r._entryDepth):e.data.height+1}else g=e.data.depth-r._entryDepth,r._atRootLevel||g++;if(g>0)for(var x=0;x0){var x,b,_,w,T,k=t.xa,A=t.ya;"h"===p.orientation?(T=e,x="y",_=A,b="x",w=k):(T=r,x="x",_=k,b="y",w=A);var M=h[t.index];if(T>=M.span[0]&&T<=M.span[1]){var S=n.extendFlat({},t),E=w.c2p(T,!0),L=o.getKdeValue(M,p,T),C=o.getPositionOnKdePath(M,p,E),P=_._offset,I=_._length;S[x+"0"]=C[0],S[x+"1"]=C[1],S[b+"0"]=S[b+"1"]=E,S[b+"Label"]=b+": "+i.hoverLabelText(w,T,p[b+"hoverformat"])+", "+h[0].t.labels.kde+" "+L.toFixed(3),S.spikeDistance=y[0].spikeDistance;var O=x+"Spike";S[O]=y[0][O],y[0].spikeDistance=void 0,y[0][O]=void 0,S.hovertemplate=!1,v.push(S),(u={stroke:t.color})[x+"1"]=n.constrain(P+C[0],P,P+I),u[x+"2"]=n.constrain(P+C[1],P,P+I),u[b+"1"]=u[b+"2"]=w._offset+E}}m&&(v=v.concat(y))}-1!==d.indexOf("points")&&(c=a.hoverOnPoints(t,e,r));var z=f.selectAll(".violinline-"+p.uid).data(u?[0]:[]);return z.enter().append("line").classed("violinline-"+p.uid,!0).attr("stroke-width",1.5),z.exit().remove(),z.attr(u),"closest"===s?c?[c]:v:c?(v.push(c),v):v}},{"../../lib":498,"../../plots/cartesian/axes":549,"../box/hover":673,"./helpers":1090}],1092:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults"),crossTraceDefaults:t("../box/defaults").crossTraceDefaults,supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc"),crossTraceCalc:t("./cross_trace_calc"),plot:t("./plot"),style:t("./style"),styleOnSelect:t("../scatter/style").styleOnSelect,hoverPoints:t("./hover"),selectPoints:t("../box/select"),moduleType:"trace",name:"violin",basePlotModule:t("../../plots/cartesian"),categories:["cartesian","svg","symbols","oriented","box-violin","showLegend","violinLayout","zoomScale"],meta:{}}},{"../../plots/cartesian":563,"../box/defaults":671,"../box/select":678,"../scatter/style":946,"./attributes":1086,"./calc":1087,"./cross_trace_calc":1088,"./defaults":1089,"./hover":1091,"./layout_attributes":1093,"./layout_defaults":1094,"./plot":1095,"./style":1096}],1093:[function(t,e,r){"use strict";var n=t("../box/layout_attributes"),i=t("../../lib").extendFlat;e.exports={violinmode:i({},n.boxmode,{}),violingap:i({},n.boxgap,{}),violingroupgap:i({},n.boxgroupgap,{})}},{"../../lib":498,"../box/layout_attributes":675}],1094:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes"),a=t("../box/layout_defaults");e.exports=function(t,e,r){a._supply(t,e,r,(function(r,a){return n.coerce(t,e,i,r,a)}),"violin")}},{"../../lib":498,"../box/layout_defaults":676,"./layout_attributes":1093}],1095:[function(t,e,r){"use strict";var n=t("@plotly/d3"),i=t("../../lib"),a=t("../../components/drawing"),o=t("../box/plot"),s=t("../scatter/line_points"),l=t("./helpers");e.exports=function(t,e,r,c){var u=t._fullLayout,f=e.xaxis,h=e.yaxis;function p(t){var e=s(t,{xaxis:f,yaxis:h,connectGaps:!0,baseTolerance:.75,shape:"spline",simplify:!0,linearized:!0});return a.smoothopen(e[0],1)}i.makeTraceGroups(c,r,"trace violins").each((function(t){var r=n.select(this),a=t[0],s=a.t,c=a.trace;if(!0!==c.visible||s.empty)r.remove();else{var d=s.bPos,m=s.bdPos,g=e[s.valLetter+"axis"],v=e[s.posLetter+"axis"],y="both"===c.side,x=y||"positive"===c.side,b=y||"negative"===c.side,_=r.selectAll("path.violin").data(i.identity);_.enter().append("path").style("vector-effect","non-scaling-stroke").attr("class","violin"),_.exit().remove(),_.each((function(t){var e,r,i,a,o,l,f,h,_=n.select(this),w=t.density,T=w.length,k=v.c2l(t.pos+d,!0),A=v.l2p(k);if(c.width)e=s.maxKDE/m;else{var M=u._violinScaleGroupStats[c.scalegroup];e="count"===c.scalemode?M.maxKDE/m*(M.maxCount/t.pts.length):M.maxKDE/m}if(x){for(f=new Array(T),o=0;o")),u.color=function(t,e){var r=t[e.dir].marker,n=r.color,a=r.line.color,o=r.line.width;if(i(n))return n;if(i(a)&&o)return a}(h,g),[u]}function k(t){return n(m,t,h[d+"hoverformat"])}}},{"../../components/color":361,"../../constants/delta.js":468,"../../plots/cartesian/axes":549,"../bar/hover":650}],1108:[function(t,e,r){"use strict";e.exports={attributes:t("./attributes"),layoutAttributes:t("./layout_attributes"),supplyDefaults:t("./defaults").supplyDefaults,crossTraceDefaults:t("./defaults").crossTraceDefaults,supplyLayoutDefaults:t("./layout_defaults"),calc:t("./calc"),crossTraceCalc:t("./cross_trace_calc"),plot:t("./plot"),style:t("./style").style,hoverPoints:t("./hover"),eventData:t("./event_data"),selectPoints:t("../bar/select"),moduleType:"trace",name:"waterfall",basePlotModule:t("../../plots/cartesian"),categories:["bar-like","cartesian","svg","oriented","showLegend","zoomScale"],meta:{}}},{"../../plots/cartesian":563,"../bar/select":655,"./attributes":1101,"./calc":1102,"./cross_trace_calc":1104,"./defaults":1105,"./event_data":1106,"./hover":1107,"./layout_attributes":1109,"./layout_defaults":1110,"./plot":1111,"./style":1112}],1109:[function(t,e,r){"use strict";e.exports={waterfallmode:{valType:"enumerated",values:["group","overlay"],dflt:"group",editType:"calc"},waterfallgap:{valType:"number",min:0,max:1,editType:"calc"},waterfallgroupgap:{valType:"number",min:0,max:1,dflt:0,editType:"calc"}}},{}],1110:[function(t,e,r){"use strict";var n=t("../../lib"),i=t("./layout_attributes");e.exports=function(t,e,r){var a=!1;function o(r,a){return n.coerce(t,e,i,r,a)}for(var s=0;s0&&(g+=h?"M"+f[0]+","+d[1]+"V"+d[0]:"M"+f[1]+","+d[0]+"H"+f[0]),"between"!==p&&(r.isSum||s path").each((function(t){if(!t.isBlank){var e=s[t.dir].marker;n.select(this).call(a.fill,e.color).call(a.stroke,e.line.color).call(i.dashLine,e.line.dash,e.line.width).style("opacity",s.selectedpoints&&!t.selected?o:1)}})),c(r,s,t),r.selectAll(".lines").each((function(){var t=s.connector.line;i.lineGroupStyle(n.select(this).selectAll("path"),t.width,t.color,t.dash)}))}))}}},{"../../components/color":361,"../../components/drawing":383,"../../constants/interactions":473,"../bar/style":657,"../bar/uniform_text":659,"@plotly/d3":58}],1113:[function(t,e,r){"use strict";var n=t("../plots/cartesian/axes"),i=t("../lib"),a=t("../plot_api/plot_schema"),o=t("./helpers").pointsAccessorFunction,s=t("../constants/numerical").BADNUM;r.moduleType="transform",r.name="aggregate";var l=r.attributes={enabled:{valType:"boolean",dflt:!0,editType:"calc"},groups:{valType:"string",strict:!0,noBlank:!0,arrayOk:!0,dflt:"x",editType:"calc"},aggregations:{_isLinkedToArray:"aggregation",target:{valType:"string",editType:"calc"},func:{valType:"enumerated",values:["count","sum","avg","median","mode","rms","stddev","min","max","first","last","change","range"],dflt:"first",editType:"calc"},funcmode:{valType:"enumerated",values:["sample","population"],dflt:"sample",editType:"calc"},enabled:{valType:"boolean",dflt:!0,editType:"calc"},editType:"calc"},editType:"calc"},c=l.aggregations;function u(t,e,r,a){if(a.enabled){for(var o=a.target,l=i.nestedProperty(e,o),c=l.get(),u=function(t,e){var r=t.func,n=e.d2c,a=e.c2d;switch(r){case"count":return f;case"first":return h;case"last":return p;case"sum":return function(t,e){for(var r=0,i=0;ii&&(i=u,o=c)}}return i?a(o):s};case"rms":return function(t,e){for(var r=0,i=0,o=0;o":return function(t){return h(t)>s};case">=":return function(t){return h(t)>=s};case"[]":return function(t){var e=h(t);return e>=s[0]&&e<=s[1]};case"()":return function(t){var e=h(t);return e>s[0]&&e=s[0]&&es[0]&&e<=s[1]};case"][":return function(t){var e=h(t);return e<=s[0]||e>=s[1]};case")(":return function(t){var e=h(t);return es[1]};case"](":return function(t){var e=h(t);return e<=s[0]||e>s[1]};case")[":return function(t){var e=h(t);return e=s[1]};case"{}":return function(t){return-1!==s.indexOf(h(t))};case"}{":return function(t){return-1===s.indexOf(h(t))}}}(r,a.getDataToCoordFunc(t,e,s,i),h),x={},b={},_=0;d?(g=function(t){x[t.astr]=n.extendDeep([],t.get()),t.set(new Array(f))},v=function(t,e){var r=x[t.astr][e];t.get()[e]=r}):(g=function(t){x[t.astr]=n.extendDeep([],t.get()),t.set([])},v=function(t,e){var r=x[t.astr][e];t.get().push(r)}),k(g);for(var w=o(e.transforms,r),T=0;T1?"%{group} (%{trace})":"%{group}");var l=t.styles,c=o.styles=[];if(l)for(a=0;a0?o-4:o;for(r=0;r>16&255,l[u++]=e>>8&255,l[u++]=255&e;2===s&&(e=i[t.charCodeAt(r)]<<2|i[t.charCodeAt(r+1)]>>4,l[u++]=255&e);1===s&&(e=i[t.charCodeAt(r)]<<10|i[t.charCodeAt(r+1)]<<4|i[t.charCodeAt(r+2)]>>2,l[u++]=e>>8&255,l[u++]=255&e);return l},r.fromByteArray=function(t){for(var e,r=t.length,i=r%3,a=[],o=0,s=r-i;os?s:o+16383));1===i?(e=t[r-1],a.push(n[e>>2]+n[e<<4&63]+"==")):2===i&&(e=(t[r-2]<<8)+t[r-1],a.push(n[e>>10]+n[e>>4&63]+n[e<<2&63]+"="));return a.join("")};for(var n=[],i=[],a="undefined"!=typeof Uint8Array?Uint8Array:Array,o="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",s=0,l=o.length;s0)throw new Error("Invalid string. Length must be a multiple of 4");var r=t.indexOf("=");return-1===r&&(r=e),[r,r===e?0:4-r%4]}function u(t,e,r){for(var i,a,o=[],s=e;s>18&63]+n[a>>12&63]+n[a>>6&63]+n[63&a]);return o.join("")}i["-".charCodeAt(0)]=62,i["_".charCodeAt(0)]=63},{}],2:[function(t,e,r){},{}],3:[function(t,e,r){(function(e){(function(){ -/*! - * The buffer module from node.js, for the browser. - * - * @author Feross Aboukhadijeh - * @license MIT - */ -"use strict";var e=t("base64-js"),n=t("ieee754");r.Buffer=a,r.SlowBuffer=function(t){+t!=t&&(t=0);return a.alloc(+t)},r.INSPECT_MAX_BYTES=50;function i(t){if(t>2147483647)throw new RangeError('The value "'+t+'" is invalid for option "size"');var e=new Uint8Array(t);return e.__proto__=a.prototype,e}function a(t,e,r){if("number"==typeof t){if("string"==typeof e)throw new TypeError('The "string" argument must be of type string. Received type number');return l(t)}return o(t,e,r)}function o(t,e,r){if("string"==typeof t)return function(t,e){"string"==typeof e&&""!==e||(e="utf8");if(!a.isEncoding(e))throw new TypeError("Unknown encoding: "+e);var r=0|f(t,e),n=i(r),o=n.write(t,e);o!==r&&(n=n.slice(0,o));return n}(t,e);if(ArrayBuffer.isView(t))return c(t);if(null==t)throw TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof t);if(B(t,ArrayBuffer)||t&&B(t.buffer,ArrayBuffer))return function(t,e,r){if(e<0||t.byteLength=2147483647)throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+2147483647..toString(16)+" bytes");return 0|t}function f(t,e){if(a.isBuffer(t))return t.length;if(ArrayBuffer.isView(t)||B(t,ArrayBuffer))return t.byteLength;if("string"!=typeof t)throw new TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+typeof t);var r=t.length,n=arguments.length>2&&!0===arguments[2];if(!n&&0===r)return 0;for(var i=!1;;)switch(e){case"ascii":case"latin1":case"binary":return r;case"utf8":case"utf-8":return D(t).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*r;case"hex":return r>>>1;case"base64":return R(t).length;default:if(i)return n?-1:D(t).length;e=(""+e).toLowerCase(),i=!0}}function h(t,e,r){var n=!1;if((void 0===e||e<0)&&(e=0),e>this.length)return"";if((void 0===r||r>this.length)&&(r=this.length),r<=0)return"";if((r>>>=0)<=(e>>>=0))return"";for(t||(t="utf8");;)switch(t){case"hex":return M(this,e,r);case"utf8":case"utf-8":return T(this,e,r);case"ascii":return k(this,e,r);case"latin1":case"binary":return A(this,e,r);case"base64":return w(this,e,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return S(this,e,r);default:if(n)throw new TypeError("Unknown encoding: "+t);t=(t+"").toLowerCase(),n=!0}}function p(t,e,r){var n=t[e];t[e]=t[r],t[r]=n}function d(t,e,r,n,i){if(0===t.length)return-1;if("string"==typeof r?(n=r,r=0):r>2147483647?r=2147483647:r<-2147483648&&(r=-2147483648),N(r=+r)&&(r=i?0:t.length-1),r<0&&(r=t.length+r),r>=t.length){if(i)return-1;r=t.length-1}else if(r<0){if(!i)return-1;r=0}if("string"==typeof e&&(e=a.from(e,n)),a.isBuffer(e))return 0===e.length?-1:m(t,e,r,n,i);if("number"==typeof e)return e&=255,"function"==typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(t,e,r):Uint8Array.prototype.lastIndexOf.call(t,e,r):m(t,[e],r,n,i);throw new TypeError("val must be string, number or Buffer")}function m(t,e,r,n,i){var a,o=1,s=t.length,l=e.length;if(void 0!==n&&("ucs2"===(n=String(n).toLowerCase())||"ucs-2"===n||"utf16le"===n||"utf-16le"===n)){if(t.length<2||e.length<2)return-1;o=2,s/=2,l/=2,r/=2}function c(t,e){return 1===o?t[e]:t.readUInt16BE(e*o)}if(i){var u=-1;for(a=r;as&&(r=s-l),a=r;a>=0;a--){for(var f=!0,h=0;hi&&(n=i):n=i;var a=e.length;n>a/2&&(n=a/2);for(var o=0;o>8,i=r%256,a.push(i),a.push(n);return a}(e,t.length-r),t,r,n)}function w(t,r,n){return 0===r&&n===t.length?e.fromByteArray(t):e.fromByteArray(t.slice(r,n))}function T(t,e,r){r=Math.min(t.length,r);for(var n=[],i=e;i239?4:c>223?3:c>191?2:1;if(i+f<=r)switch(f){case 1:c<128&&(u=c);break;case 2:128==(192&(a=t[i+1]))&&(l=(31&c)<<6|63&a)>127&&(u=l);break;case 3:a=t[i+1],o=t[i+2],128==(192&a)&&128==(192&o)&&(l=(15&c)<<12|(63&a)<<6|63&o)>2047&&(l<55296||l>57343)&&(u=l);break;case 4:a=t[i+1],o=t[i+2],s=t[i+3],128==(192&a)&&128==(192&o)&&128==(192&s)&&(l=(15&c)<<18|(63&a)<<12|(63&o)<<6|63&s)>65535&&l<1114112&&(u=l)}null===u?(u=65533,f=1):u>65535&&(u-=65536,n.push(u>>>10&1023|55296),u=56320|1023&u),n.push(u),i+=f}return function(t){var e=t.length;if(e<=4096)return String.fromCharCode.apply(String,t);var r="",n=0;for(;ne&&(t+=" ... "),""},a.prototype.compare=function(t,e,r,n,i){if(B(t,Uint8Array)&&(t=a.from(t,t.offset,t.byteLength)),!a.isBuffer(t))throw new TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+typeof t);if(void 0===e&&(e=0),void 0===r&&(r=t?t.length:0),void 0===n&&(n=0),void 0===i&&(i=this.length),e<0||r>t.length||n<0||i>this.length)throw new RangeError("out of range index");if(n>=i&&e>=r)return 0;if(n>=i)return-1;if(e>=r)return 1;if(this===t)return 0;for(var o=(i>>>=0)-(n>>>=0),s=(r>>>=0)-(e>>>=0),l=Math.min(o,s),c=this.slice(n,i),u=t.slice(e,r),f=0;f>>=0,isFinite(r)?(r>>>=0,void 0===n&&(n="utf8")):(n=r,r=void 0)}var i=this.length-e;if((void 0===r||r>i)&&(r=i),t.length>0&&(r<0||e<0)||e>this.length)throw new RangeError("Attempt to write outside buffer bounds");n||(n="utf8");for(var a=!1;;)switch(n){case"hex":return g(this,t,e,r);case"utf8":case"utf-8":return v(this,t,e,r);case"ascii":return y(this,t,e,r);case"latin1":case"binary":return x(this,t,e,r);case"base64":return b(this,t,e,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return _(this,t,e,r);default:if(a)throw new TypeError("Unknown encoding: "+n);n=(""+n).toLowerCase(),a=!0}},a.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};function k(t,e,r){var n="";r=Math.min(t.length,r);for(var i=e;in)&&(r=n);for(var i="",a=e;ar)throw new RangeError("Trying to access beyond buffer length")}function L(t,e,r,n,i,o){if(!a.isBuffer(t))throw new TypeError('"buffer" argument must be a Buffer instance');if(e>i||et.length)throw new RangeError("Index out of range")}function C(t,e,r,n,i,a){if(r+n>t.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("Index out of range")}function P(t,e,r,i,a){return e=+e,r>>>=0,a||C(t,0,r,4),n.write(t,e,r,i,23,4),r+4}function I(t,e,r,i,a){return e=+e,r>>>=0,a||C(t,0,r,8),n.write(t,e,r,i,52,8),r+8}a.prototype.slice=function(t,e){var r=this.length;(t=~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),(e=void 0===e?r:~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),e>>=0,e>>>=0,r||E(t,e,this.length);for(var n=this[t],i=1,a=0;++a>>=0,e>>>=0,r||E(t,e,this.length);for(var n=this[t+--e],i=1;e>0&&(i*=256);)n+=this[t+--e]*i;return n},a.prototype.readUInt8=function(t,e){return t>>>=0,e||E(t,1,this.length),this[t]},a.prototype.readUInt16LE=function(t,e){return t>>>=0,e||E(t,2,this.length),this[t]|this[t+1]<<8},a.prototype.readUInt16BE=function(t,e){return t>>>=0,e||E(t,2,this.length),this[t]<<8|this[t+1]},a.prototype.readUInt32LE=function(t,e){return t>>>=0,e||E(t,4,this.length),(this[t]|this[t+1]<<8|this[t+2]<<16)+16777216*this[t+3]},a.prototype.readUInt32BE=function(t,e){return t>>>=0,e||E(t,4,this.length),16777216*this[t]+(this[t+1]<<16|this[t+2]<<8|this[t+3])},a.prototype.readIntLE=function(t,e,r){t>>>=0,e>>>=0,r||E(t,e,this.length);for(var n=this[t],i=1,a=0;++a=(i*=128)&&(n-=Math.pow(2,8*e)),n},a.prototype.readIntBE=function(t,e,r){t>>>=0,e>>>=0,r||E(t,e,this.length);for(var n=e,i=1,a=this[t+--n];n>0&&(i*=256);)a+=this[t+--n]*i;return a>=(i*=128)&&(a-=Math.pow(2,8*e)),a},a.prototype.readInt8=function(t,e){return t>>>=0,e||E(t,1,this.length),128&this[t]?-1*(255-this[t]+1):this[t]},a.prototype.readInt16LE=function(t,e){t>>>=0,e||E(t,2,this.length);var r=this[t]|this[t+1]<<8;return 32768&r?4294901760|r:r},a.prototype.readInt16BE=function(t,e){t>>>=0,e||E(t,2,this.length);var r=this[t+1]|this[t]<<8;return 32768&r?4294901760|r:r},a.prototype.readInt32LE=function(t,e){return t>>>=0,e||E(t,4,this.length),this[t]|this[t+1]<<8|this[t+2]<<16|this[t+3]<<24},a.prototype.readInt32BE=function(t,e){return t>>>=0,e||E(t,4,this.length),this[t]<<24|this[t+1]<<16|this[t+2]<<8|this[t+3]},a.prototype.readFloatLE=function(t,e){return t>>>=0,e||E(t,4,this.length),n.read(this,t,!0,23,4)},a.prototype.readFloatBE=function(t,e){return t>>>=0,e||E(t,4,this.length),n.read(this,t,!1,23,4)},a.prototype.readDoubleLE=function(t,e){return t>>>=0,e||E(t,8,this.length),n.read(this,t,!0,52,8)},a.prototype.readDoubleBE=function(t,e){return t>>>=0,e||E(t,8,this.length),n.read(this,t,!1,52,8)},a.prototype.writeUIntLE=function(t,e,r,n){(t=+t,e>>>=0,r>>>=0,n)||L(this,t,e,r,Math.pow(2,8*r)-1,0);var i=1,a=0;for(this[e]=255&t;++a>>=0,r>>>=0,n)||L(this,t,e,r,Math.pow(2,8*r)-1,0);var i=r-1,a=1;for(this[e+i]=255&t;--i>=0&&(a*=256);)this[e+i]=t/a&255;return e+r},a.prototype.writeUInt8=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,1,255,0),this[e]=255&t,e+1},a.prototype.writeUInt16LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,65535,0),this[e]=255&t,this[e+1]=t>>>8,e+2},a.prototype.writeUInt16BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,65535,0),this[e]=t>>>8,this[e+1]=255&t,e+2},a.prototype.writeUInt32LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,4294967295,0),this[e+3]=t>>>24,this[e+2]=t>>>16,this[e+1]=t>>>8,this[e]=255&t,e+4},a.prototype.writeUInt32BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,4294967295,0),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},a.prototype.writeIntLE=function(t,e,r,n){if(t=+t,e>>>=0,!n){var i=Math.pow(2,8*r-1);L(this,t,e,r,i-1,-i)}var a=0,o=1,s=0;for(this[e]=255&t;++a>0)-s&255;return e+r},a.prototype.writeIntBE=function(t,e,r,n){if(t=+t,e>>>=0,!n){var i=Math.pow(2,8*r-1);L(this,t,e,r,i-1,-i)}var a=r-1,o=1,s=0;for(this[e+a]=255&t;--a>=0&&(o*=256);)t<0&&0===s&&0!==this[e+a+1]&&(s=1),this[e+a]=(t/o>>0)-s&255;return e+r},a.prototype.writeInt8=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,1,127,-128),t<0&&(t=255+t+1),this[e]=255&t,e+1},a.prototype.writeInt16LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,32767,-32768),this[e]=255&t,this[e+1]=t>>>8,e+2},a.prototype.writeInt16BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,2,32767,-32768),this[e]=t>>>8,this[e+1]=255&t,e+2},a.prototype.writeInt32LE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,2147483647,-2147483648),this[e]=255&t,this[e+1]=t>>>8,this[e+2]=t>>>16,this[e+3]=t>>>24,e+4},a.prototype.writeInt32BE=function(t,e,r){return t=+t,e>>>=0,r||L(this,t,e,4,2147483647,-2147483648),t<0&&(t=4294967295+t+1),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},a.prototype.writeFloatLE=function(t,e,r){return P(this,t,e,!0,r)},a.prototype.writeFloatBE=function(t,e,r){return P(this,t,e,!1,r)},a.prototype.writeDoubleLE=function(t,e,r){return I(this,t,e,!0,r)},a.prototype.writeDoubleBE=function(t,e,r){return I(this,t,e,!1,r)},a.prototype.copy=function(t,e,r,n){if(!a.isBuffer(t))throw new TypeError("argument should be a Buffer");if(r||(r=0),n||0===n||(n=this.length),e>=t.length&&(e=t.length),e||(e=0),n>0&&n=this.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("sourceEnd out of bounds");n>this.length&&(n=this.length),t.length-e=0;--o)t[o+e]=this[o+r];else Uint8Array.prototype.set.call(t,this.subarray(r,n),e);return i},a.prototype.fill=function(t,e,r,n){if("string"==typeof t){if("string"==typeof e?(n=e,e=0,r=this.length):"string"==typeof r&&(n=r,r=this.length),void 0!==n&&"string"!=typeof n)throw new TypeError("encoding must be a string");if("string"==typeof n&&!a.isEncoding(n))throw new TypeError("Unknown encoding: "+n);if(1===t.length){var i=t.charCodeAt(0);("utf8"===n&&i<128||"latin1"===n)&&(t=i)}}else"number"==typeof t&&(t&=255);if(e<0||this.length>>=0,r=void 0===r?this.length:r>>>0,t||(t=0),"number"==typeof t)for(o=e;o55295&&r<57344){if(!i){if(r>56319){(e-=3)>-1&&a.push(239,191,189);continue}if(o+1===n){(e-=3)>-1&&a.push(239,191,189);continue}i=r;continue}if(r<56320){(e-=3)>-1&&a.push(239,191,189),i=r;continue}r=65536+(i-55296<<10|r-56320)}else i&&(e-=3)>-1&&a.push(239,191,189);if(i=null,r<128){if((e-=1)<0)break;a.push(r)}else if(r<2048){if((e-=2)<0)break;a.push(r>>6|192,63&r|128)}else if(r<65536){if((e-=3)<0)break;a.push(r>>12|224,r>>6&63|128,63&r|128)}else{if(!(r<1114112))throw new Error("Invalid code point");if((e-=4)<0)break;a.push(r>>18|240,r>>12&63|128,r>>6&63|128,63&r|128)}}return a}function R(t){return e.toByteArray(function(t){if((t=(t=t.split("=")[0]).trim().replace(O,"")).length<2)return"";for(;t.length%4!=0;)t+="=";return t}(t))}function F(t,e,r,n){for(var i=0;i=e.length||i>=t.length);++i)e[i+r]=t[i];return i}function B(t,e){return t instanceof e||null!=t&&null!=t.constructor&&null!=t.constructor.name&&t.constructor.name===e.name}function N(t){return t!=t}}).call(this)}).call(this,t("buffer").Buffer)},{"base64-js":1,buffer:3,ieee754:4}],4:[function(t,e,r){r.read=function(t,e,r,n,i){var a,o,s=8*i-n-1,l=(1<>1,u=-7,f=r?i-1:0,h=r?-1:1,p=t[e+f];for(f+=h,a=p&(1<<-u)-1,p>>=-u,u+=s;u>0;a=256*a+t[e+f],f+=h,u-=8);for(o=a&(1<<-u)-1,a>>=-u,u+=n;u>0;o=256*o+t[e+f],f+=h,u-=8);if(0===a)a=1-c;else{if(a===l)return o?NaN:1/0*(p?-1:1);o+=Math.pow(2,n),a-=c}return(p?-1:1)*o*Math.pow(2,a-n)},r.write=function(t,e,r,n,i,a){var o,s,l,c=8*a-i-1,u=(1<>1,h=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,p=n?0:a-1,d=n?1:-1,m=e<0||0===e&&1/e<0?1:0;for(e=Math.abs(e),isNaN(e)||e===1/0?(s=isNaN(e)?1:0,o=u):(o=Math.floor(Math.log(e)/Math.LN2),e*(l=Math.pow(2,-o))<1&&(o--,l*=2),(e+=o+f>=1?h/l:h*Math.pow(2,1-f))*l>=2&&(o++,l/=2),o+f>=u?(s=0,o=u):o+f>=1?(s=(e*l-1)*Math.pow(2,i),o+=f):(s=e*Math.pow(2,f-1)*Math.pow(2,i),o=0));i>=8;t[r+p]=255&s,p+=d,s/=256,i-=8);for(o=o<0;t[r+p]=255&o,p+=d,o/=256,c-=8);t[r+p-d]|=128*m}},{}],5:[function(t,e,r){var n,i,a=e.exports={};function o(){throw new Error("setTimeout has not been defined")}function s(){throw new Error("clearTimeout has not been defined")}function l(t){if(n===setTimeout)return setTimeout(t,0);if((n===o||!n)&&setTimeout)return n=setTimeout,setTimeout(t,0);try{return n(t,0)}catch(e){try{return n.call(null,t,0)}catch(e){return n.call(this,t,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:o}catch(t){n=o}try{i="function"==typeof clearTimeout?clearTimeout:s}catch(t){i=s}}();var c,u=[],f=!1,h=-1;function p(){f&&c&&(f=!1,c.length?u=c.concat(u):h=-1,u.length&&d())}function d(){if(!f){var t=l(p);f=!0;for(var e=u.length;e;){for(c=u,u=[];++h1)for(var r=1;r0?c=c.ushln(f):f<0&&(u=u.ushln(-f));return s(c,u)}},{"./div":17,"./is-rat":19,"./lib/is-bn":23,"./lib/num-to-bn":24,"./lib/rationalize":25,"./lib/str-to-bn":26}],19:[function(t,e,r){"use strict";var n=t("./lib/is-bn");e.exports=function(t){return Array.isArray(t)&&2===t.length&&n(t[0])&&n(t[1])}},{"./lib/is-bn":23}],20:[function(t,e,r){"use strict";var n=t("bn.js");e.exports=function(t){return t.cmp(new n(0))}},{"bn.js":33}],21:[function(t,e,r){"use strict";var n=t("./bn-sign");e.exports=function(t){var e=t.length,r=t.words,i=0;if(1===e)i=r[0];else if(2===e)i=r[0]+67108864*r[1];else for(var a=0;a20)return 52;return r+32}},{"bit-twiddle":32,"double-bits":64}],23:[function(t,e,r){"use strict";t("bn.js");e.exports=function(t){return t&&"object"==typeof t&&Boolean(t.words)}},{"bn.js":33}],24:[function(t,e,r){"use strict";var n=t("bn.js"),i=t("double-bits");e.exports=function(t){var e=i.exponent(t);return e<52?new n(t):new n(t*Math.pow(2,52-e)).ushln(e-52)}},{"bn.js":33,"double-bits":64}],25:[function(t,e,r){"use strict";var n=t("./num-to-bn"),i=t("./bn-sign");e.exports=function(t,e){var r=i(t),a=i(e);if(0===r)return[n(0),n(1)];if(0===a)return[n(0),n(0)];a<0&&(t=t.neg(),e=e.neg());var o=t.gcd(e);if(o.cmpn(1))return[t.div(o),e.div(o)];return[t,e]}},{"./bn-sign":20,"./num-to-bn":24}],26:[function(t,e,r){"use strict";var n=t("bn.js");e.exports=function(t){return new n(t)}},{"bn.js":33}],27:[function(t,e,r){"use strict";var n=t("./lib/rationalize");e.exports=function(t,e){return n(t[0].mul(e[0]),t[1].mul(e[1]))}},{"./lib/rationalize":25}],28:[function(t,e,r){"use strict";var n=t("./lib/bn-sign");e.exports=function(t){return n(t[0])*n(t[1])}},{"./lib/bn-sign":20}],29:[function(t,e,r){"use strict";var n=t("./lib/rationalize");e.exports=function(t,e){return n(t[0].mul(e[1]).sub(t[1].mul(e[0])),t[1].mul(e[1]))}},{"./lib/rationalize":25}],30:[function(t,e,r){"use strict";var n=t("./lib/bn-to-num"),i=t("./lib/ctz");e.exports=function(t){var e=t[0],r=t[1];if(0===e.cmpn(0))return 0;var a=e.abs().divmod(r.abs()),o=a.div,s=n(o),l=a.mod,c=e.negative!==r.negative?-1:1;if(0===l.cmpn(0))return c*s;if(s){var u=i(s)+4,f=n(l.ushln(u).divRound(r));return c*(s+f*Math.pow(2,-u))}var h=r.bitLength()-l.bitLength()+53;f=n(l.ushln(h).divRound(r));return h<1023?c*f*Math.pow(2,-h):(f*=Math.pow(2,-1023),c*f*Math.pow(2,1023-h))}},{"./lib/bn-to-num":21,"./lib/ctz":22}],31:[function(t,e,r){"use strict";function n(t,e,r,n,i){for(var a=i+1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)>=0?(a=o,i=o-1):n=o+1}return a}function i(t,e,r,n,i){for(var a=i+1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)>0?(a=o,i=o-1):n=o+1}return a}function a(t,e,r,n,i){for(var a=n-1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)<0?(a=o,n=o+1):i=o-1}return a}function o(t,e,r,n,i){for(var a=n-1;n<=i;){var o=n+i>>>1,s=t[o];(void 0!==r?r(s,e):s-e)<=0?(a=o,n=o+1):i=o-1}return a}function s(t,e,r,n,i){for(;n<=i;){var a=n+i>>>1,o=t[a],s=void 0!==r?r(o,e):o-e;if(0===s)return a;s<=0?n=a+1:i=a-1}return-1}function l(t,e,r,n,i,a){return"function"==typeof r?a(t,e,r,void 0===n?0:0|n,void 0===i?t.length-1:0|i):a(t,e,void 0,void 0===r?0:0|r,void 0===n?t.length-1:0|n)}e.exports={ge:function(t,e,r,i,a){return l(t,e,r,i,a,n)},gt:function(t,e,r,n,a){return l(t,e,r,n,a,i)},lt:function(t,e,r,n,i){return l(t,e,r,n,i,a)},le:function(t,e,r,n,i){return l(t,e,r,n,i,o)},eq:function(t,e,r,n,i){return l(t,e,r,n,i,s)}}},{}],32:[function(t,e,r){"use strict";function n(t){var e=32;return(t&=-t)&&e--,65535&t&&(e-=16),16711935&t&&(e-=8),252645135&t&&(e-=4),858993459&t&&(e-=2),1431655765&t&&(e-=1),e}r.INT_BITS=32,r.INT_MAX=2147483647,r.INT_MIN=-1<<31,r.sign=function(t){return(t>0)-(t<0)},r.abs=function(t){var e=t>>31;return(t^e)-e},r.min=function(t,e){return e^(t^e)&-(t65535)<<4,e|=r=((t>>>=e)>255)<<3,e|=r=((t>>>=r)>15)<<2,(e|=r=((t>>>=r)>3)<<1)|(t>>>=r)>>1},r.log10=function(t){return t>=1e9?9:t>=1e8?8:t>=1e7?7:t>=1e6?6:t>=1e5?5:t>=1e4?4:t>=1e3?3:t>=100?2:t>=10?1:0},r.popCount=function(t){return 16843009*((t=(858993459&(t-=t>>>1&1431655765))+(t>>>2&858993459))+(t>>>4)&252645135)>>>24},r.countTrailingZeros=n,r.nextPow2=function(t){return t+=0===t,--t,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,(t|=t>>>16)+1},r.prevPow2=function(t){return t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,(t|=t>>>16)-(t>>>1)},r.parity=function(t){return t^=t>>>16,t^=t>>>8,t^=t>>>4,27030>>>(t&=15)&1};var i=new Array(256);!function(t){for(var e=0;e<256;++e){var r=e,n=e,i=7;for(r>>>=1;r;r>>>=1)n<<=1,n|=1&r,--i;t[e]=n<>>8&255]<<16|i[t>>>16&255]<<8|i[t>>>24&255]},r.interleave2=function(t,e){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t&=65535)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e&=65535)|e<<8))|e<<4))|e<<2))|e<<1))<<1},r.deinterleave2=function(t,e){return(t=65535&((t=16711935&((t=252645135&((t=858993459&((t=t>>>e&1431655765)|t>>>1))|t>>>2))|t>>>4))|t>>>16))<<16>>16},r.interleave3=function(t,e,r){return t=1227133513&((t=3272356035&((t=251719695&((t=4278190335&((t&=1023)|t<<16))|t<<8))|t<<4))|t<<2),(t|=(e=1227133513&((e=3272356035&((e=251719695&((e=4278190335&((e&=1023)|e<<16))|e<<8))|e<<4))|e<<2))<<1)|(r=1227133513&((r=3272356035&((r=251719695&((r=4278190335&((r&=1023)|r<<16))|r<<8))|r<<4))|r<<2))<<2},r.deinterleave3=function(t,e){return(t=1023&((t=4278190335&((t=251719695&((t=3272356035&((t=t>>>e&1227133513)|t>>>2))|t>>>4))|t>>>8))|t>>>16))<<22>>22},r.nextCombination=function(t){var e=t|t-1;return e+1|(~e&-~e)-1>>>n(t)+1}},{}],33:[function(t,e,r){!function(e,r){"use strict";function n(t,e){if(!t)throw new Error(e||"Assertion failed")}function i(t,e){t.super_=e;var r=function(){};r.prototype=e.prototype,t.prototype=new r,t.prototype.constructor=t}function a(t,e,r){if(a.isBN(t))return t;this.negative=0,this.words=null,this.length=0,this.red=null,null!==t&&("le"!==e&&"be"!==e||(r=e,e=10),this._init(t||0,e||10,r||"be"))}var o;"object"==typeof e?e.exports=a:r.BN=a,a.BN=a,a.wordSize=26;try{o="undefined"!=typeof window&&void 0!==window.Buffer?window.Buffer:t("buffer").Buffer}catch(t){}function s(t,e){var r=t.charCodeAt(e);return r>=65&&r<=70?r-55:r>=97&&r<=102?r-87:r-48&15}function l(t,e,r){var n=s(t,r);return r-1>=e&&(n|=s(t,r-1)<<4),n}function c(t,e,r,n){for(var i=0,a=Math.min(t.length,r),o=e;o=49?s-49+10:s>=17?s-17+10:s}return i}a.isBN=function(t){return t instanceof a||null!==t&&"object"==typeof t&&t.constructor.wordSize===a.wordSize&&Array.isArray(t.words)},a.max=function(t,e){return t.cmp(e)>0?t:e},a.min=function(t,e){return t.cmp(e)<0?t:e},a.prototype._init=function(t,e,r){if("number"==typeof t)return this._initNumber(t,e,r);if("object"==typeof t)return this._initArray(t,e,r);"hex"===e&&(e=16),n(e===(0|e)&&e>=2&&e<=36);var i=0;"-"===(t=t.toString().replace(/\s+/g,""))[0]&&(i++,this.negative=1),i=0;i-=3)o=t[i]|t[i-1]<<8|t[i-2]<<16,this.words[a]|=o<>>26-s&67108863,(s+=24)>=26&&(s-=26,a++);else if("le"===r)for(i=0,a=0;i>>26-s&67108863,(s+=24)>=26&&(s-=26,a++);return this.strip()},a.prototype._parseHex=function(t,e,r){this.length=Math.ceil((t.length-e)/6),this.words=new Array(this.length);for(var n=0;n=e;n-=2)i=l(t,e,n)<=18?(a-=18,o+=1,this.words[o]|=i>>>26):a+=8;else for(n=(t.length-e)%2==0?e+1:e;n=18?(a-=18,o+=1,this.words[o]|=i>>>26):a+=8;this.strip()},a.prototype._parseBase=function(t,e,r){this.words=[0],this.length=1;for(var n=0,i=1;i<=67108863;i*=e)n++;n--,i=i/e|0;for(var a=t.length-r,o=a%n,s=Math.min(a,a-o)+r,l=0,u=r;u1&&0===this.words[this.length-1];)this.length--;return this._normSign()},a.prototype._normSign=function(){return 1===this.length&&0===this.words[0]&&(this.negative=0),this},a.prototype.inspect=function(){return(this.red?""};var u=["","0","00","000","0000","00000","000000","0000000","00000000","000000000","0000000000","00000000000","000000000000","0000000000000","00000000000000","000000000000000","0000000000000000","00000000000000000","000000000000000000","0000000000000000000","00000000000000000000","000000000000000000000","0000000000000000000000","00000000000000000000000","000000000000000000000000","0000000000000000000000000"],f=[0,0,25,16,12,11,10,9,8,8,7,7,7,7,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5],h=[0,0,33554432,43046721,16777216,48828125,60466176,40353607,16777216,43046721,1e7,19487171,35831808,62748517,7529536,11390625,16777216,24137569,34012224,47045881,64e6,4084101,5153632,6436343,7962624,9765625,11881376,14348907,17210368,20511149,243e5,28629151,33554432,39135393,45435424,52521875,60466176];function p(t,e,r){r.negative=e.negative^t.negative;var n=t.length+e.length|0;r.length=n,n=n-1|0;var i=0|t.words[0],a=0|e.words[0],o=i*a,s=67108863&o,l=o/67108864|0;r.words[0]=s;for(var c=1;c>>26,f=67108863&l,h=Math.min(c,e.length-1),p=Math.max(0,c-t.length+1);p<=h;p++){var d=c-p|0;u+=(o=(i=0|t.words[d])*(a=0|e.words[p])+f)/67108864|0,f=67108863&o}r.words[c]=0|f,l=0|u}return 0!==l?r.words[c]=0|l:r.length--,r.strip()}a.prototype.toString=function(t,e){var r;if(e=0|e||1,16===(t=t||10)||"hex"===t){r="";for(var i=0,a=0,o=0;o>>24-i&16777215)||o!==this.length-1?u[6-l.length]+l+r:l+r,(i+=2)>=26&&(i-=26,o--)}for(0!==a&&(r=a.toString(16)+r);r.length%e!=0;)r="0"+r;return 0!==this.negative&&(r="-"+r),r}if(t===(0|t)&&t>=2&&t<=36){var c=f[t],p=h[t];r="";var d=this.clone();for(d.negative=0;!d.isZero();){var m=d.modn(p).toString(t);r=(d=d.idivn(p)).isZero()?m+r:u[c-m.length]+m+r}for(this.isZero()&&(r="0"+r);r.length%e!=0;)r="0"+r;return 0!==this.negative&&(r="-"+r),r}n(!1,"Base should be between 2 and 36")},a.prototype.toNumber=function(){var t=this.words[0];return 2===this.length?t+=67108864*this.words[1]:3===this.length&&1===this.words[2]?t+=4503599627370496+67108864*this.words[1]:this.length>2&&n(!1,"Number can only safely store up to 53 bits"),0!==this.negative?-t:t},a.prototype.toJSON=function(){return this.toString(16)},a.prototype.toBuffer=function(t,e){return n(void 0!==o),this.toArrayLike(o,t,e)},a.prototype.toArray=function(t,e){return this.toArrayLike(Array,t,e)},a.prototype.toArrayLike=function(t,e,r){var i=this.byteLength(),a=r||Math.max(1,i);n(i<=a,"byte array longer than desired length"),n(a>0,"Requested array length <= 0"),this.strip();var o,s,l="le"===e,c=new t(a),u=this.clone();if(l){for(s=0;!u.isZero();s++)o=u.andln(255),u.iushrn(8),c[s]=o;for(;s=4096&&(r+=13,e>>>=13),e>=64&&(r+=7,e>>>=7),e>=8&&(r+=4,e>>>=4),e>=2&&(r+=2,e>>>=2),r+e},a.prototype._zeroBits=function(t){if(0===t)return 26;var e=t,r=0;return 0==(8191&e)&&(r+=13,e>>>=13),0==(127&e)&&(r+=7,e>>>=7),0==(15&e)&&(r+=4,e>>>=4),0==(3&e)&&(r+=2,e>>>=2),0==(1&e)&&r++,r},a.prototype.bitLength=function(){var t=this.words[this.length-1],e=this._countBits(t);return 26*(this.length-1)+e},a.prototype.zeroBits=function(){if(this.isZero())return 0;for(var t=0,e=0;et.length?this.clone().ior(t):t.clone().ior(this)},a.prototype.uor=function(t){return this.length>t.length?this.clone().iuor(t):t.clone().iuor(this)},a.prototype.iuand=function(t){var e;e=this.length>t.length?t:this;for(var r=0;rt.length?this.clone().iand(t):t.clone().iand(this)},a.prototype.uand=function(t){return this.length>t.length?this.clone().iuand(t):t.clone().iuand(this)},a.prototype.iuxor=function(t){var e,r;this.length>t.length?(e=this,r=t):(e=t,r=this);for(var n=0;nt.length?this.clone().ixor(t):t.clone().ixor(this)},a.prototype.uxor=function(t){return this.length>t.length?this.clone().iuxor(t):t.clone().iuxor(this)},a.prototype.inotn=function(t){n("number"==typeof t&&t>=0);var e=0|Math.ceil(t/26),r=t%26;this._expand(e),r>0&&e--;for(var i=0;i0&&(this.words[i]=~this.words[i]&67108863>>26-r),this.strip()},a.prototype.notn=function(t){return this.clone().inotn(t)},a.prototype.setn=function(t,e){n("number"==typeof t&&t>=0);var r=t/26|0,i=t%26;return this._expand(r+1),this.words[r]=e?this.words[r]|1<t.length?(r=this,n=t):(r=t,n=this);for(var i=0,a=0;a>>26;for(;0!==i&&a>>26;if(this.length=r.length,0!==i)this.words[this.length]=i,this.length++;else if(r!==this)for(;at.length?this.clone().iadd(t):t.clone().iadd(this)},a.prototype.isub=function(t){if(0!==t.negative){t.negative=0;var e=this.iadd(t);return t.negative=1,e._normSign()}if(0!==this.negative)return this.negative=0,this.iadd(t),this.negative=1,this._normSign();var r,n,i=this.cmp(t);if(0===i)return this.negative=0,this.length=1,this.words[0]=0,this;i>0?(r=this,n=t):(r=t,n=this);for(var a=0,o=0;o>26,this.words[o]=67108863&e;for(;0!==a&&o>26,this.words[o]=67108863&e;if(0===a&&o>>13,p=0|o[1],d=8191&p,m=p>>>13,g=0|o[2],v=8191&g,y=g>>>13,x=0|o[3],b=8191&x,_=x>>>13,w=0|o[4],T=8191&w,k=w>>>13,A=0|o[5],M=8191&A,S=A>>>13,E=0|o[6],L=8191&E,C=E>>>13,P=0|o[7],I=8191&P,O=P>>>13,z=0|o[8],D=8191&z,R=z>>>13,F=0|o[9],B=8191&F,N=F>>>13,j=0|s[0],U=8191&j,V=j>>>13,H=0|s[1],q=8191&H,G=H>>>13,Y=0|s[2],W=8191&Y,X=Y>>>13,Z=0|s[3],J=8191&Z,K=Z>>>13,Q=0|s[4],$=8191&Q,tt=Q>>>13,et=0|s[5],rt=8191&et,nt=et>>>13,it=0|s[6],at=8191&it,ot=it>>>13,st=0|s[7],lt=8191&st,ct=st>>>13,ut=0|s[8],ft=8191&ut,ht=ut>>>13,pt=0|s[9],dt=8191&pt,mt=pt>>>13;r.negative=t.negative^e.negative,r.length=19;var gt=(c+(n=Math.imul(f,U))|0)+((8191&(i=(i=Math.imul(f,V))+Math.imul(h,U)|0))<<13)|0;c=((a=Math.imul(h,V))+(i>>>13)|0)+(gt>>>26)|0,gt&=67108863,n=Math.imul(d,U),i=(i=Math.imul(d,V))+Math.imul(m,U)|0,a=Math.imul(m,V);var vt=(c+(n=n+Math.imul(f,q)|0)|0)+((8191&(i=(i=i+Math.imul(f,G)|0)+Math.imul(h,q)|0))<<13)|0;c=((a=a+Math.imul(h,G)|0)+(i>>>13)|0)+(vt>>>26)|0,vt&=67108863,n=Math.imul(v,U),i=(i=Math.imul(v,V))+Math.imul(y,U)|0,a=Math.imul(y,V),n=n+Math.imul(d,q)|0,i=(i=i+Math.imul(d,G)|0)+Math.imul(m,q)|0,a=a+Math.imul(m,G)|0;var yt=(c+(n=n+Math.imul(f,W)|0)|0)+((8191&(i=(i=i+Math.imul(f,X)|0)+Math.imul(h,W)|0))<<13)|0;c=((a=a+Math.imul(h,X)|0)+(i>>>13)|0)+(yt>>>26)|0,yt&=67108863,n=Math.imul(b,U),i=(i=Math.imul(b,V))+Math.imul(_,U)|0,a=Math.imul(_,V),n=n+Math.imul(v,q)|0,i=(i=i+Math.imul(v,G)|0)+Math.imul(y,q)|0,a=a+Math.imul(y,G)|0,n=n+Math.imul(d,W)|0,i=(i=i+Math.imul(d,X)|0)+Math.imul(m,W)|0,a=a+Math.imul(m,X)|0;var xt=(c+(n=n+Math.imul(f,J)|0)|0)+((8191&(i=(i=i+Math.imul(f,K)|0)+Math.imul(h,J)|0))<<13)|0;c=((a=a+Math.imul(h,K)|0)+(i>>>13)|0)+(xt>>>26)|0,xt&=67108863,n=Math.imul(T,U),i=(i=Math.imul(T,V))+Math.imul(k,U)|0,a=Math.imul(k,V),n=n+Math.imul(b,q)|0,i=(i=i+Math.imul(b,G)|0)+Math.imul(_,q)|0,a=a+Math.imul(_,G)|0,n=n+Math.imul(v,W)|0,i=(i=i+Math.imul(v,X)|0)+Math.imul(y,W)|0,a=a+Math.imul(y,X)|0,n=n+Math.imul(d,J)|0,i=(i=i+Math.imul(d,K)|0)+Math.imul(m,J)|0,a=a+Math.imul(m,K)|0;var bt=(c+(n=n+Math.imul(f,$)|0)|0)+((8191&(i=(i=i+Math.imul(f,tt)|0)+Math.imul(h,$)|0))<<13)|0;c=((a=a+Math.imul(h,tt)|0)+(i>>>13)|0)+(bt>>>26)|0,bt&=67108863,n=Math.imul(M,U),i=(i=Math.imul(M,V))+Math.imul(S,U)|0,a=Math.imul(S,V),n=n+Math.imul(T,q)|0,i=(i=i+Math.imul(T,G)|0)+Math.imul(k,q)|0,a=a+Math.imul(k,G)|0,n=n+Math.imul(b,W)|0,i=(i=i+Math.imul(b,X)|0)+Math.imul(_,W)|0,a=a+Math.imul(_,X)|0,n=n+Math.imul(v,J)|0,i=(i=i+Math.imul(v,K)|0)+Math.imul(y,J)|0,a=a+Math.imul(y,K)|0,n=n+Math.imul(d,$)|0,i=(i=i+Math.imul(d,tt)|0)+Math.imul(m,$)|0,a=a+Math.imul(m,tt)|0;var _t=(c+(n=n+Math.imul(f,rt)|0)|0)+((8191&(i=(i=i+Math.imul(f,nt)|0)+Math.imul(h,rt)|0))<<13)|0;c=((a=a+Math.imul(h,nt)|0)+(i>>>13)|0)+(_t>>>26)|0,_t&=67108863,n=Math.imul(L,U),i=(i=Math.imul(L,V))+Math.imul(C,U)|0,a=Math.imul(C,V),n=n+Math.imul(M,q)|0,i=(i=i+Math.imul(M,G)|0)+Math.imul(S,q)|0,a=a+Math.imul(S,G)|0,n=n+Math.imul(T,W)|0,i=(i=i+Math.imul(T,X)|0)+Math.imul(k,W)|0,a=a+Math.imul(k,X)|0,n=n+Math.imul(b,J)|0,i=(i=i+Math.imul(b,K)|0)+Math.imul(_,J)|0,a=a+Math.imul(_,K)|0,n=n+Math.imul(v,$)|0,i=(i=i+Math.imul(v,tt)|0)+Math.imul(y,$)|0,a=a+Math.imul(y,tt)|0,n=n+Math.imul(d,rt)|0,i=(i=i+Math.imul(d,nt)|0)+Math.imul(m,rt)|0,a=a+Math.imul(m,nt)|0;var wt=(c+(n=n+Math.imul(f,at)|0)|0)+((8191&(i=(i=i+Math.imul(f,ot)|0)+Math.imul(h,at)|0))<<13)|0;c=((a=a+Math.imul(h,ot)|0)+(i>>>13)|0)+(wt>>>26)|0,wt&=67108863,n=Math.imul(I,U),i=(i=Math.imul(I,V))+Math.imul(O,U)|0,a=Math.imul(O,V),n=n+Math.imul(L,q)|0,i=(i=i+Math.imul(L,G)|0)+Math.imul(C,q)|0,a=a+Math.imul(C,G)|0,n=n+Math.imul(M,W)|0,i=(i=i+Math.imul(M,X)|0)+Math.imul(S,W)|0,a=a+Math.imul(S,X)|0,n=n+Math.imul(T,J)|0,i=(i=i+Math.imul(T,K)|0)+Math.imul(k,J)|0,a=a+Math.imul(k,K)|0,n=n+Math.imul(b,$)|0,i=(i=i+Math.imul(b,tt)|0)+Math.imul(_,$)|0,a=a+Math.imul(_,tt)|0,n=n+Math.imul(v,rt)|0,i=(i=i+Math.imul(v,nt)|0)+Math.imul(y,rt)|0,a=a+Math.imul(y,nt)|0,n=n+Math.imul(d,at)|0,i=(i=i+Math.imul(d,ot)|0)+Math.imul(m,at)|0,a=a+Math.imul(m,ot)|0;var Tt=(c+(n=n+Math.imul(f,lt)|0)|0)+((8191&(i=(i=i+Math.imul(f,ct)|0)+Math.imul(h,lt)|0))<<13)|0;c=((a=a+Math.imul(h,ct)|0)+(i>>>13)|0)+(Tt>>>26)|0,Tt&=67108863,n=Math.imul(D,U),i=(i=Math.imul(D,V))+Math.imul(R,U)|0,a=Math.imul(R,V),n=n+Math.imul(I,q)|0,i=(i=i+Math.imul(I,G)|0)+Math.imul(O,q)|0,a=a+Math.imul(O,G)|0,n=n+Math.imul(L,W)|0,i=(i=i+Math.imul(L,X)|0)+Math.imul(C,W)|0,a=a+Math.imul(C,X)|0,n=n+Math.imul(M,J)|0,i=(i=i+Math.imul(M,K)|0)+Math.imul(S,J)|0,a=a+Math.imul(S,K)|0,n=n+Math.imul(T,$)|0,i=(i=i+Math.imul(T,tt)|0)+Math.imul(k,$)|0,a=a+Math.imul(k,tt)|0,n=n+Math.imul(b,rt)|0,i=(i=i+Math.imul(b,nt)|0)+Math.imul(_,rt)|0,a=a+Math.imul(_,nt)|0,n=n+Math.imul(v,at)|0,i=(i=i+Math.imul(v,ot)|0)+Math.imul(y,at)|0,a=a+Math.imul(y,ot)|0,n=n+Math.imul(d,lt)|0,i=(i=i+Math.imul(d,ct)|0)+Math.imul(m,lt)|0,a=a+Math.imul(m,ct)|0;var kt=(c+(n=n+Math.imul(f,ft)|0)|0)+((8191&(i=(i=i+Math.imul(f,ht)|0)+Math.imul(h,ft)|0))<<13)|0;c=((a=a+Math.imul(h,ht)|0)+(i>>>13)|0)+(kt>>>26)|0,kt&=67108863,n=Math.imul(B,U),i=(i=Math.imul(B,V))+Math.imul(N,U)|0,a=Math.imul(N,V),n=n+Math.imul(D,q)|0,i=(i=i+Math.imul(D,G)|0)+Math.imul(R,q)|0,a=a+Math.imul(R,G)|0,n=n+Math.imul(I,W)|0,i=(i=i+Math.imul(I,X)|0)+Math.imul(O,W)|0,a=a+Math.imul(O,X)|0,n=n+Math.imul(L,J)|0,i=(i=i+Math.imul(L,K)|0)+Math.imul(C,J)|0,a=a+Math.imul(C,K)|0,n=n+Math.imul(M,$)|0,i=(i=i+Math.imul(M,tt)|0)+Math.imul(S,$)|0,a=a+Math.imul(S,tt)|0,n=n+Math.imul(T,rt)|0,i=(i=i+Math.imul(T,nt)|0)+Math.imul(k,rt)|0,a=a+Math.imul(k,nt)|0,n=n+Math.imul(b,at)|0,i=(i=i+Math.imul(b,ot)|0)+Math.imul(_,at)|0,a=a+Math.imul(_,ot)|0,n=n+Math.imul(v,lt)|0,i=(i=i+Math.imul(v,ct)|0)+Math.imul(y,lt)|0,a=a+Math.imul(y,ct)|0,n=n+Math.imul(d,ft)|0,i=(i=i+Math.imul(d,ht)|0)+Math.imul(m,ft)|0,a=a+Math.imul(m,ht)|0;var At=(c+(n=n+Math.imul(f,dt)|0)|0)+((8191&(i=(i=i+Math.imul(f,mt)|0)+Math.imul(h,dt)|0))<<13)|0;c=((a=a+Math.imul(h,mt)|0)+(i>>>13)|0)+(At>>>26)|0,At&=67108863,n=Math.imul(B,q),i=(i=Math.imul(B,G))+Math.imul(N,q)|0,a=Math.imul(N,G),n=n+Math.imul(D,W)|0,i=(i=i+Math.imul(D,X)|0)+Math.imul(R,W)|0,a=a+Math.imul(R,X)|0,n=n+Math.imul(I,J)|0,i=(i=i+Math.imul(I,K)|0)+Math.imul(O,J)|0,a=a+Math.imul(O,K)|0,n=n+Math.imul(L,$)|0,i=(i=i+Math.imul(L,tt)|0)+Math.imul(C,$)|0,a=a+Math.imul(C,tt)|0,n=n+Math.imul(M,rt)|0,i=(i=i+Math.imul(M,nt)|0)+Math.imul(S,rt)|0,a=a+Math.imul(S,nt)|0,n=n+Math.imul(T,at)|0,i=(i=i+Math.imul(T,ot)|0)+Math.imul(k,at)|0,a=a+Math.imul(k,ot)|0,n=n+Math.imul(b,lt)|0,i=(i=i+Math.imul(b,ct)|0)+Math.imul(_,lt)|0,a=a+Math.imul(_,ct)|0,n=n+Math.imul(v,ft)|0,i=(i=i+Math.imul(v,ht)|0)+Math.imul(y,ft)|0,a=a+Math.imul(y,ht)|0;var Mt=(c+(n=n+Math.imul(d,dt)|0)|0)+((8191&(i=(i=i+Math.imul(d,mt)|0)+Math.imul(m,dt)|0))<<13)|0;c=((a=a+Math.imul(m,mt)|0)+(i>>>13)|0)+(Mt>>>26)|0,Mt&=67108863,n=Math.imul(B,W),i=(i=Math.imul(B,X))+Math.imul(N,W)|0,a=Math.imul(N,X),n=n+Math.imul(D,J)|0,i=(i=i+Math.imul(D,K)|0)+Math.imul(R,J)|0,a=a+Math.imul(R,K)|0,n=n+Math.imul(I,$)|0,i=(i=i+Math.imul(I,tt)|0)+Math.imul(O,$)|0,a=a+Math.imul(O,tt)|0,n=n+Math.imul(L,rt)|0,i=(i=i+Math.imul(L,nt)|0)+Math.imul(C,rt)|0,a=a+Math.imul(C,nt)|0,n=n+Math.imul(M,at)|0,i=(i=i+Math.imul(M,ot)|0)+Math.imul(S,at)|0,a=a+Math.imul(S,ot)|0,n=n+Math.imul(T,lt)|0,i=(i=i+Math.imul(T,ct)|0)+Math.imul(k,lt)|0,a=a+Math.imul(k,ct)|0,n=n+Math.imul(b,ft)|0,i=(i=i+Math.imul(b,ht)|0)+Math.imul(_,ft)|0,a=a+Math.imul(_,ht)|0;var St=(c+(n=n+Math.imul(v,dt)|0)|0)+((8191&(i=(i=i+Math.imul(v,mt)|0)+Math.imul(y,dt)|0))<<13)|0;c=((a=a+Math.imul(y,mt)|0)+(i>>>13)|0)+(St>>>26)|0,St&=67108863,n=Math.imul(B,J),i=(i=Math.imul(B,K))+Math.imul(N,J)|0,a=Math.imul(N,K),n=n+Math.imul(D,$)|0,i=(i=i+Math.imul(D,tt)|0)+Math.imul(R,$)|0,a=a+Math.imul(R,tt)|0,n=n+Math.imul(I,rt)|0,i=(i=i+Math.imul(I,nt)|0)+Math.imul(O,rt)|0,a=a+Math.imul(O,nt)|0,n=n+Math.imul(L,at)|0,i=(i=i+Math.imul(L,ot)|0)+Math.imul(C,at)|0,a=a+Math.imul(C,ot)|0,n=n+Math.imul(M,lt)|0,i=(i=i+Math.imul(M,ct)|0)+Math.imul(S,lt)|0,a=a+Math.imul(S,ct)|0,n=n+Math.imul(T,ft)|0,i=(i=i+Math.imul(T,ht)|0)+Math.imul(k,ft)|0,a=a+Math.imul(k,ht)|0;var Et=(c+(n=n+Math.imul(b,dt)|0)|0)+((8191&(i=(i=i+Math.imul(b,mt)|0)+Math.imul(_,dt)|0))<<13)|0;c=((a=a+Math.imul(_,mt)|0)+(i>>>13)|0)+(Et>>>26)|0,Et&=67108863,n=Math.imul(B,$),i=(i=Math.imul(B,tt))+Math.imul(N,$)|0,a=Math.imul(N,tt),n=n+Math.imul(D,rt)|0,i=(i=i+Math.imul(D,nt)|0)+Math.imul(R,rt)|0,a=a+Math.imul(R,nt)|0,n=n+Math.imul(I,at)|0,i=(i=i+Math.imul(I,ot)|0)+Math.imul(O,at)|0,a=a+Math.imul(O,ot)|0,n=n+Math.imul(L,lt)|0,i=(i=i+Math.imul(L,ct)|0)+Math.imul(C,lt)|0,a=a+Math.imul(C,ct)|0,n=n+Math.imul(M,ft)|0,i=(i=i+Math.imul(M,ht)|0)+Math.imul(S,ft)|0,a=a+Math.imul(S,ht)|0;var Lt=(c+(n=n+Math.imul(T,dt)|0)|0)+((8191&(i=(i=i+Math.imul(T,mt)|0)+Math.imul(k,dt)|0))<<13)|0;c=((a=a+Math.imul(k,mt)|0)+(i>>>13)|0)+(Lt>>>26)|0,Lt&=67108863,n=Math.imul(B,rt),i=(i=Math.imul(B,nt))+Math.imul(N,rt)|0,a=Math.imul(N,nt),n=n+Math.imul(D,at)|0,i=(i=i+Math.imul(D,ot)|0)+Math.imul(R,at)|0,a=a+Math.imul(R,ot)|0,n=n+Math.imul(I,lt)|0,i=(i=i+Math.imul(I,ct)|0)+Math.imul(O,lt)|0,a=a+Math.imul(O,ct)|0,n=n+Math.imul(L,ft)|0,i=(i=i+Math.imul(L,ht)|0)+Math.imul(C,ft)|0,a=a+Math.imul(C,ht)|0;var Ct=(c+(n=n+Math.imul(M,dt)|0)|0)+((8191&(i=(i=i+Math.imul(M,mt)|0)+Math.imul(S,dt)|0))<<13)|0;c=((a=a+Math.imul(S,mt)|0)+(i>>>13)|0)+(Ct>>>26)|0,Ct&=67108863,n=Math.imul(B,at),i=(i=Math.imul(B,ot))+Math.imul(N,at)|0,a=Math.imul(N,ot),n=n+Math.imul(D,lt)|0,i=(i=i+Math.imul(D,ct)|0)+Math.imul(R,lt)|0,a=a+Math.imul(R,ct)|0,n=n+Math.imul(I,ft)|0,i=(i=i+Math.imul(I,ht)|0)+Math.imul(O,ft)|0,a=a+Math.imul(O,ht)|0;var Pt=(c+(n=n+Math.imul(L,dt)|0)|0)+((8191&(i=(i=i+Math.imul(L,mt)|0)+Math.imul(C,dt)|0))<<13)|0;c=((a=a+Math.imul(C,mt)|0)+(i>>>13)|0)+(Pt>>>26)|0,Pt&=67108863,n=Math.imul(B,lt),i=(i=Math.imul(B,ct))+Math.imul(N,lt)|0,a=Math.imul(N,ct),n=n+Math.imul(D,ft)|0,i=(i=i+Math.imul(D,ht)|0)+Math.imul(R,ft)|0,a=a+Math.imul(R,ht)|0;var It=(c+(n=n+Math.imul(I,dt)|0)|0)+((8191&(i=(i=i+Math.imul(I,mt)|0)+Math.imul(O,dt)|0))<<13)|0;c=((a=a+Math.imul(O,mt)|0)+(i>>>13)|0)+(It>>>26)|0,It&=67108863,n=Math.imul(B,ft),i=(i=Math.imul(B,ht))+Math.imul(N,ft)|0,a=Math.imul(N,ht);var Ot=(c+(n=n+Math.imul(D,dt)|0)|0)+((8191&(i=(i=i+Math.imul(D,mt)|0)+Math.imul(R,dt)|0))<<13)|0;c=((a=a+Math.imul(R,mt)|0)+(i>>>13)|0)+(Ot>>>26)|0,Ot&=67108863;var zt=(c+(n=Math.imul(B,dt))|0)+((8191&(i=(i=Math.imul(B,mt))+Math.imul(N,dt)|0))<<13)|0;return c=((a=Math.imul(N,mt))+(i>>>13)|0)+(zt>>>26)|0,zt&=67108863,l[0]=gt,l[1]=vt,l[2]=yt,l[3]=xt,l[4]=bt,l[5]=_t,l[6]=wt,l[7]=Tt,l[8]=kt,l[9]=At,l[10]=Mt,l[11]=St,l[12]=Et,l[13]=Lt,l[14]=Ct,l[15]=Pt,l[16]=It,l[17]=Ot,l[18]=zt,0!==c&&(l[19]=c,r.length++),r};function m(t,e,r){return(new g).mulp(t,e,r)}function g(t,e){this.x=t,this.y=e}Math.imul||(d=p),a.prototype.mulTo=function(t,e){var r=this.length+t.length;return 10===this.length&&10===t.length?d(this,t,e):r<63?p(this,t,e):r<1024?function(t,e,r){r.negative=e.negative^t.negative,r.length=t.length+e.length;for(var n=0,i=0,a=0;a>>26)|0)>>>26,o&=67108863}r.words[a]=s,n=o,o=i}return 0!==n?r.words[a]=n:r.length--,r.strip()}(this,t,e):m(this,t,e)},g.prototype.makeRBT=function(t){for(var e=new Array(t),r=a.prototype._countBits(t)-1,n=0;n>=1;return n},g.prototype.permute=function(t,e,r,n,i,a){for(var o=0;o>>=1)i++;return 1<>>=13,r[2*o+1]=8191&a,a>>>=13;for(o=2*e;o>=26,e+=i/67108864|0,e+=a>>>26,this.words[r]=67108863&a}return 0!==e&&(this.words[r]=e,this.length++),this},a.prototype.muln=function(t){return this.clone().imuln(t)},a.prototype.sqr=function(){return this.mul(this)},a.prototype.isqr=function(){return this.imul(this.clone())},a.prototype.pow=function(t){var e=function(t){for(var e=new Array(t.bitLength()),r=0;r>>i}return e}(t);if(0===e.length)return new a(1);for(var r=this,n=0;n=0);var e,r=t%26,i=(t-r)/26,a=67108863>>>26-r<<26-r;if(0!==r){var o=0;for(e=0;e>>26-r}o&&(this.words[e]=o,this.length++)}if(0!==i){for(e=this.length-1;e>=0;e--)this.words[e+i]=this.words[e];for(e=0;e=0),i=e?(e-e%26)/26:0;var a=t%26,o=Math.min((t-a)/26,this.length),s=67108863^67108863>>>a<o)for(this.length-=o,c=0;c=0&&(0!==u||c>=i);c--){var f=0|this.words[c];this.words[c]=u<<26-a|f>>>a,u=f&s}return l&&0!==u&&(l.words[l.length++]=u),0===this.length&&(this.words[0]=0,this.length=1),this.strip()},a.prototype.ishrn=function(t,e,r){return n(0===this.negative),this.iushrn(t,e,r)},a.prototype.shln=function(t){return this.clone().ishln(t)},a.prototype.ushln=function(t){return this.clone().iushln(t)},a.prototype.shrn=function(t){return this.clone().ishrn(t)},a.prototype.ushrn=function(t){return this.clone().iushrn(t)},a.prototype.testn=function(t){n("number"==typeof t&&t>=0);var e=t%26,r=(t-e)/26,i=1<=0);var e=t%26,r=(t-e)/26;if(n(0===this.negative,"imaskn works only with positive numbers"),this.length<=r)return this;if(0!==e&&r++,this.length=Math.min(r,this.length),0!==e){var i=67108863^67108863>>>e<=67108864;e++)this.words[e]-=67108864,e===this.length-1?this.words[e+1]=1:this.words[e+1]++;return this.length=Math.max(this.length,e+1),this},a.prototype.isubn=function(t){if(n("number"==typeof t),n(t<67108864),t<0)return this.iaddn(-t);if(0!==this.negative)return this.negative=0,this.iaddn(t),this.negative=1,this;if(this.words[0]-=t,1===this.length&&this.words[0]<0)this.words[0]=-this.words[0],this.negative=1;else for(var e=0;e>26)-(l/67108864|0),this.words[i+r]=67108863&a}for(;i>26,this.words[i+r]=67108863&a;if(0===s)return this.strip();for(n(-1===s),s=0,i=0;i>26,this.words[i]=67108863&a;return this.negative=1,this.strip()},a.prototype._wordDiv=function(t,e){var r=(this.length,t.length),n=this.clone(),i=t,o=0|i.words[i.length-1];0!==(r=26-this._countBits(o))&&(i=i.ushln(r),n.iushln(r),o=0|i.words[i.length-1]);var s,l=n.length-i.length;if("mod"!==e){(s=new a(null)).length=l+1,s.words=new Array(s.length);for(var c=0;c=0;f--){var h=67108864*(0|n.words[i.length+f])+(0|n.words[i.length+f-1]);for(h=Math.min(h/o|0,67108863),n._ishlnsubmul(i,h,f);0!==n.negative;)h--,n.negative=0,n._ishlnsubmul(i,1,f),n.isZero()||(n.negative^=1);s&&(s.words[f]=h)}return s&&s.strip(),n.strip(),"div"!==e&&0!==r&&n.iushrn(r),{div:s||null,mod:n}},a.prototype.divmod=function(t,e,r){return n(!t.isZero()),this.isZero()?{div:new a(0),mod:new a(0)}:0!==this.negative&&0===t.negative?(s=this.neg().divmod(t,e),"mod"!==e&&(i=s.div.neg()),"div"!==e&&(o=s.mod.neg(),r&&0!==o.negative&&o.iadd(t)),{div:i,mod:o}):0===this.negative&&0!==t.negative?(s=this.divmod(t.neg(),e),"mod"!==e&&(i=s.div.neg()),{div:i,mod:s.mod}):0!=(this.negative&t.negative)?(s=this.neg().divmod(t.neg(),e),"div"!==e&&(o=s.mod.neg(),r&&0!==o.negative&&o.isub(t)),{div:s.div,mod:o}):t.length>this.length||this.cmp(t)<0?{div:new a(0),mod:this}:1===t.length?"div"===e?{div:this.divn(t.words[0]),mod:null}:"mod"===e?{div:null,mod:new a(this.modn(t.words[0]))}:{div:this.divn(t.words[0]),mod:new a(this.modn(t.words[0]))}:this._wordDiv(t,e);var i,o,s},a.prototype.div=function(t){return this.divmod(t,"div",!1).div},a.prototype.mod=function(t){return this.divmod(t,"mod",!1).mod},a.prototype.umod=function(t){return this.divmod(t,"mod",!0).mod},a.prototype.divRound=function(t){var e=this.divmod(t);if(e.mod.isZero())return e.div;var r=0!==e.div.negative?e.mod.isub(t):e.mod,n=t.ushrn(1),i=t.andln(1),a=r.cmp(n);return a<0||1===i&&0===a?e.div:0!==e.div.negative?e.div.isubn(1):e.div.iaddn(1)},a.prototype.modn=function(t){n(t<=67108863);for(var e=(1<<26)%t,r=0,i=this.length-1;i>=0;i--)r=(e*r+(0|this.words[i]))%t;return r},a.prototype.idivn=function(t){n(t<=67108863);for(var e=0,r=this.length-1;r>=0;r--){var i=(0|this.words[r])+67108864*e;this.words[r]=i/t|0,e=i%t}return this.strip()},a.prototype.divn=function(t){return this.clone().idivn(t)},a.prototype.egcd=function(t){n(0===t.negative),n(!t.isZero());var e=this,r=t.clone();e=0!==e.negative?e.umod(t):e.clone();for(var i=new a(1),o=new a(0),s=new a(0),l=new a(1),c=0;e.isEven()&&r.isEven();)e.iushrn(1),r.iushrn(1),++c;for(var u=r.clone(),f=e.clone();!e.isZero();){for(var h=0,p=1;0==(e.words[0]&p)&&h<26;++h,p<<=1);if(h>0)for(e.iushrn(h);h-- >0;)(i.isOdd()||o.isOdd())&&(i.iadd(u),o.isub(f)),i.iushrn(1),o.iushrn(1);for(var d=0,m=1;0==(r.words[0]&m)&&d<26;++d,m<<=1);if(d>0)for(r.iushrn(d);d-- >0;)(s.isOdd()||l.isOdd())&&(s.iadd(u),l.isub(f)),s.iushrn(1),l.iushrn(1);e.cmp(r)>=0?(e.isub(r),i.isub(s),o.isub(l)):(r.isub(e),s.isub(i),l.isub(o))}return{a:s,b:l,gcd:r.iushln(c)}},a.prototype._invmp=function(t){n(0===t.negative),n(!t.isZero());var e=this,r=t.clone();e=0!==e.negative?e.umod(t):e.clone();for(var i,o=new a(1),s=new a(0),l=r.clone();e.cmpn(1)>0&&r.cmpn(1)>0;){for(var c=0,u=1;0==(e.words[0]&u)&&c<26;++c,u<<=1);if(c>0)for(e.iushrn(c);c-- >0;)o.isOdd()&&o.iadd(l),o.iushrn(1);for(var f=0,h=1;0==(r.words[0]&h)&&f<26;++f,h<<=1);if(f>0)for(r.iushrn(f);f-- >0;)s.isOdd()&&s.iadd(l),s.iushrn(1);e.cmp(r)>=0?(e.isub(r),o.isub(s)):(r.isub(e),s.isub(o))}return(i=0===e.cmpn(1)?o:s).cmpn(0)<0&&i.iadd(t),i},a.prototype.gcd=function(t){if(this.isZero())return t.abs();if(t.isZero())return this.abs();var e=this.clone(),r=t.clone();e.negative=0,r.negative=0;for(var n=0;e.isEven()&&r.isEven();n++)e.iushrn(1),r.iushrn(1);for(;;){for(;e.isEven();)e.iushrn(1);for(;r.isEven();)r.iushrn(1);var i=e.cmp(r);if(i<0){var a=e;e=r,r=a}else if(0===i||0===r.cmpn(1))break;e.isub(r)}return r.iushln(n)},a.prototype.invm=function(t){return this.egcd(t).a.umod(t)},a.prototype.isEven=function(){return 0==(1&this.words[0])},a.prototype.isOdd=function(){return 1==(1&this.words[0])},a.prototype.andln=function(t){return this.words[0]&t},a.prototype.bincn=function(t){n("number"==typeof t);var e=t%26,r=(t-e)/26,i=1<>>26,s&=67108863,this.words[o]=s}return 0!==a&&(this.words[o]=a,this.length++),this},a.prototype.isZero=function(){return 1===this.length&&0===this.words[0]},a.prototype.cmpn=function(t){var e,r=t<0;if(0!==this.negative&&!r)return-1;if(0===this.negative&&r)return 1;if(this.strip(),this.length>1)e=1;else{r&&(t=-t),n(t<=67108863,"Number is too big");var i=0|this.words[0];e=i===t?0:it.length)return 1;if(this.length=0;r--){var n=0|this.words[r],i=0|t.words[r];if(n!==i){ni&&(e=1);break}}return e},a.prototype.gtn=function(t){return 1===this.cmpn(t)},a.prototype.gt=function(t){return 1===this.cmp(t)},a.prototype.gten=function(t){return this.cmpn(t)>=0},a.prototype.gte=function(t){return this.cmp(t)>=0},a.prototype.ltn=function(t){return-1===this.cmpn(t)},a.prototype.lt=function(t){return-1===this.cmp(t)},a.prototype.lten=function(t){return this.cmpn(t)<=0},a.prototype.lte=function(t){return this.cmp(t)<=0},a.prototype.eqn=function(t){return 0===this.cmpn(t)},a.prototype.eq=function(t){return 0===this.cmp(t)},a.red=function(t){return new T(t)},a.prototype.toRed=function(t){return n(!this.red,"Already a number in reduction context"),n(0===this.negative,"red works only with positives"),t.convertTo(this)._forceRed(t)},a.prototype.fromRed=function(){return n(this.red,"fromRed works only with numbers in reduction context"),this.red.convertFrom(this)},a.prototype._forceRed=function(t){return this.red=t,this},a.prototype.forceRed=function(t){return n(!this.red,"Already a number in reduction context"),this._forceRed(t)},a.prototype.redAdd=function(t){return n(this.red,"redAdd works only with red numbers"),this.red.add(this,t)},a.prototype.redIAdd=function(t){return n(this.red,"redIAdd works only with red numbers"),this.red.iadd(this,t)},a.prototype.redSub=function(t){return n(this.red,"redSub works only with red numbers"),this.red.sub(this,t)},a.prototype.redISub=function(t){return n(this.red,"redISub works only with red numbers"),this.red.isub(this,t)},a.prototype.redShl=function(t){return n(this.red,"redShl works only with red numbers"),this.red.shl(this,t)},a.prototype.redMul=function(t){return n(this.red,"redMul works only with red numbers"),this.red._verify2(this,t),this.red.mul(this,t)},a.prototype.redIMul=function(t){return n(this.red,"redMul works only with red numbers"),this.red._verify2(this,t),this.red.imul(this,t)},a.prototype.redSqr=function(){return n(this.red,"redSqr works only with red numbers"),this.red._verify1(this),this.red.sqr(this)},a.prototype.redISqr=function(){return n(this.red,"redISqr works only with red numbers"),this.red._verify1(this),this.red.isqr(this)},a.prototype.redSqrt=function(){return n(this.red,"redSqrt works only with red numbers"),this.red._verify1(this),this.red.sqrt(this)},a.prototype.redInvm=function(){return n(this.red,"redInvm works only with red numbers"),this.red._verify1(this),this.red.invm(this)},a.prototype.redNeg=function(){return n(this.red,"redNeg works only with red numbers"),this.red._verify1(this),this.red.neg(this)},a.prototype.redPow=function(t){return n(this.red&&!t.red,"redPow(normalNum)"),this.red._verify1(this),this.red.pow(this,t)};var v={k256:null,p224:null,p192:null,p25519:null};function y(t,e){this.name=t,this.p=new a(e,16),this.n=this.p.bitLength(),this.k=new a(1).iushln(this.n).isub(this.p),this.tmp=this._tmp()}function x(){y.call(this,"k256","ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffe fffffc2f")}function b(){y.call(this,"p224","ffffffff ffffffff ffffffff ffffffff 00000000 00000000 00000001")}function _(){y.call(this,"p192","ffffffff ffffffff ffffffff fffffffe ffffffff ffffffff")}function w(){y.call(this,"25519","7fffffffffffffff ffffffffffffffff ffffffffffffffff ffffffffffffffed")}function T(t){if("string"==typeof t){var e=a._prime(t);this.m=e.p,this.prime=e}else n(t.gtn(1),"modulus must be greater than 1"),this.m=t,this.prime=null}function k(t){T.call(this,t),this.shift=this.m.bitLength(),this.shift%26!=0&&(this.shift+=26-this.shift%26),this.r=new a(1).iushln(this.shift),this.r2=this.imod(this.r.sqr()),this.rinv=this.r._invmp(this.m),this.minv=this.rinv.mul(this.r).isubn(1).div(this.m),this.minv=this.minv.umod(this.r),this.minv=this.r.sub(this.minv)}y.prototype._tmp=function(){var t=new a(null);return t.words=new Array(Math.ceil(this.n/13)),t},y.prototype.ireduce=function(t){var e,r=t;do{this.split(r,this.tmp),e=(r=(r=this.imulK(r)).iadd(this.tmp)).bitLength()}while(e>this.n);var n=e0?r.isub(this.p):void 0!==r.strip?r.strip():r._strip(),r},y.prototype.split=function(t,e){t.iushrn(this.n,0,e)},y.prototype.imulK=function(t){return t.imul(this.k)},i(x,y),x.prototype.split=function(t,e){for(var r=Math.min(t.length,9),n=0;n>>22,i=a}i>>>=22,t.words[n-10]=i,0===i&&t.length>10?t.length-=10:t.length-=9},x.prototype.imulK=function(t){t.words[t.length]=0,t.words[t.length+1]=0,t.length+=2;for(var e=0,r=0;r>>=26,t.words[r]=i,e=n}return 0!==e&&(t.words[t.length++]=e),t},a._prime=function(t){if(v[t])return v[t];var e;if("k256"===t)e=new x;else if("p224"===t)e=new b;else if("p192"===t)e=new _;else{if("p25519"!==t)throw new Error("Unknown prime "+t);e=new w}return v[t]=e,e},T.prototype._verify1=function(t){n(0===t.negative,"red works only with positives"),n(t.red,"red works only with red numbers")},T.prototype._verify2=function(t,e){n(0==(t.negative|e.negative),"red works only with positives"),n(t.red&&t.red===e.red,"red works only with red numbers")},T.prototype.imod=function(t){return this.prime?this.prime.ireduce(t)._forceRed(this):t.umod(this.m)._forceRed(this)},T.prototype.neg=function(t){return t.isZero()?t.clone():this.m.sub(t)._forceRed(this)},T.prototype.add=function(t,e){this._verify2(t,e);var r=t.add(e);return r.cmp(this.m)>=0&&r.isub(this.m),r._forceRed(this)},T.prototype.iadd=function(t,e){this._verify2(t,e);var r=t.iadd(e);return r.cmp(this.m)>=0&&r.isub(this.m),r},T.prototype.sub=function(t,e){this._verify2(t,e);var r=t.sub(e);return r.cmpn(0)<0&&r.iadd(this.m),r._forceRed(this)},T.prototype.isub=function(t,e){this._verify2(t,e);var r=t.isub(e);return r.cmpn(0)<0&&r.iadd(this.m),r},T.prototype.shl=function(t,e){return this._verify1(t),this.imod(t.ushln(e))},T.prototype.imul=function(t,e){return this._verify2(t,e),this.imod(t.imul(e))},T.prototype.mul=function(t,e){return this._verify2(t,e),this.imod(t.mul(e))},T.prototype.isqr=function(t){return this.imul(t,t.clone())},T.prototype.sqr=function(t){return this.mul(t,t)},T.prototype.sqrt=function(t){if(t.isZero())return t.clone();var e=this.m.andln(3);if(n(e%2==1),3===e){var r=this.m.add(new a(1)).iushrn(2);return this.pow(t,r)}for(var i=this.m.subn(1),o=0;!i.isZero()&&0===i.andln(1);)o++,i.iushrn(1);n(!i.isZero());var s=new a(1).toRed(this),l=s.redNeg(),c=this.m.subn(1).iushrn(1),u=this.m.bitLength();for(u=new a(2*u*u).toRed(this);0!==this.pow(u,c).cmp(l);)u.redIAdd(l);for(var f=this.pow(u,i),h=this.pow(t,i.addn(1).iushrn(1)),p=this.pow(t,i),d=o;0!==p.cmp(s);){for(var m=p,g=0;0!==m.cmp(s);g++)m=m.redSqr();n(g=0;n--){for(var c=e.words[n],u=l-1;u>=0;u--){var f=c>>u&1;i!==r[0]&&(i=this.sqr(i)),0!==f||0!==o?(o<<=1,o|=f,(4===++s||0===n&&0===u)&&(i=this.mul(i,r[o]),s=0,o=0)):s=0}l=26}return i},T.prototype.convertTo=function(t){var e=t.umod(this.m);return e===t?e.clone():e},T.prototype.convertFrom=function(t){var e=t.clone();return e.red=null,e},a.mont=function(t){return new k(t)},i(k,T),k.prototype.convertTo=function(t){return this.imod(t.ushln(this.shift))},k.prototype.convertFrom=function(t){var e=this.imod(t.mul(this.rinv));return e.red=null,e},k.prototype.imul=function(t,e){if(t.isZero()||e.isZero())return t.words[0]=0,t.length=1,t;var r=t.imul(e),n=r.maskn(this.shift).mul(this.minv).imaskn(this.shift).mul(this.m),i=r.isub(n).iushrn(this.shift),a=i;return i.cmp(this.m)>=0?a=i.isub(this.m):i.cmpn(0)<0&&(a=i.iadd(this.m)),a._forceRed(this)},k.prototype.mul=function(t,e){if(t.isZero()||e.isZero())return new a(0)._forceRed(this);var r=t.mul(e),n=r.maskn(this.shift).mul(this.minv).imaskn(this.shift).mul(this.m),i=r.isub(n).iushrn(this.shift),o=i;return i.cmp(this.m)>=0?o=i.isub(this.m):i.cmpn(0)<0&&(o=i.iadd(this.m)),o._forceRed(this)},k.prototype.invm=function(t){return this.imod(t._invmp(this.m).mul(this.r2))._forceRed(this)}}(void 0===e||e,this)},{buffer:2}],34:[function(t,e,r){"use strict";e.exports=function(t){var e,r,n,i=t.length,a=0;for(e=0;e>>1;if(!(u<=0)){var f,h=i.mallocDouble(2*u*s),p=i.mallocInt32(s);if((s=l(t,u,h,p))>0){if(1===u&&n)a.init(s),f=a.sweepComplete(u,r,0,s,h,p,0,s,h,p);else{var d=i.mallocDouble(2*u*c),m=i.mallocInt32(c);(c=l(e,u,d,m))>0&&(a.init(s+c),f=1===u?a.sweepBipartite(u,r,0,s,h,p,0,c,d,m):o(u,r,n,s,h,p,c,d,m),i.free(d),i.free(m))}i.free(h),i.free(p)}return f}}}function u(t,e){n.push([t,e])}function f(t){return n=[],c(t,t,u,!0),n}function h(t,e){return n=[],c(t,e,u,!1),n}},{"./lib/intersect":37,"./lib/sweep":41,"typedarray-pool":308}],36:[function(t,e,r){"use strict";function n(t){return t?function(t,e,r,n,i,a,o,s,l,c,u){return i-n>l-s?function(t,e,r,n,i,a,o,s,l,c,u){for(var f=2*t,h=n,p=f*n;hc-l?n?function(t,e,r,n,i,a,o,s,l,c,u){for(var f=2*t,h=n,p=f*n;h0;){var L=6*(S-=1),C=v[L],P=v[L+1],I=v[L+2],O=v[L+3],z=v[L+4],D=v[L+5],R=2*S,F=y[R],B=y[R+1],N=1&D,j=!!(16&D),U=u,V=w,H=k,q=A;if(N&&(U=k,V=A,H=u,q=w),!(2&D&&(I=p(t,C,P,I,U,V,B),P>=I)||4&D&&(P=d(t,C,P,I,U,V,F))>=I)){var G=I-P,Y=z-O;if(j){if(t*G*(G+Y)<1<<22){if(void 0!==(M=l.scanComplete(t,C,e,P,I,U,V,O,z,H,q)))return M;continue}}else{if(t*Math.min(G,Y)<128){if(void 0!==(M=o(t,C,e,N,P,I,U,V,O,z,H,q)))return M;continue}if(t*G*Y<1<<22){if(void 0!==(M=l.scanBipartite(t,C,e,N,P,I,U,V,O,z,H,q)))return M;continue}}var W=f(t,C,P,I,U,V,F,B);if(P=p0)&&!(p1>=hi)"),h=u("lo===p0"),p=u("lo>>1,f=2*t,h=u,p=o[f*u+e];for(;l=y?(h=v,p=y):g>=b?(h=m,p=g):(h=x,p=b):y>=b?(h=v,p=y):b>=g?(h=m,p=g):(h=x,p=b);for(var _=f*(c-1),w=f*h,T=0;Tr&&i[f+e]>c;--u,f-=o){for(var h=f,p=f+o,d=0;dh;++h,l+=s){if(i[l+f]===o)if(u===h)u+=1,c+=s;else{for(var p=0;s>p;++p){var d=i[l+p];i[l+p]=i[c],i[c++]=d}var m=a[h];a[h]=a[u],a[u++]=m}}return u},"loh;++h,l+=s){if(i[l+f]p;++p){var d=i[l+p];i[l+p]=i[c],i[c++]=d}var m=a[h];a[h]=a[u],a[u++]=m}}return u},"lo<=p0":function(t,e,r,n,i,a,o){for(var s=2*t,l=s*r,c=l,u=r,f=t+e,h=r;n>h;++h,l+=s){if(i[l+f]<=o)if(u===h)u+=1,c+=s;else{for(var p=0;s>p;++p){var d=i[l+p];i[l+p]=i[c],i[c++]=d}var m=a[h];a[h]=a[u],a[u++]=m}}return u},"hi<=p0":function(t,e,r,n,i,a,o){for(var s=2*t,l=s*r,c=l,u=r,f=t+e,h=r;n>h;++h,l+=s){if(i[l+f]<=o)if(u===h)u+=1,c+=s;else{for(var p=0;s>p;++p){var d=i[l+p];i[l+p]=i[c],i[c++]=d}var m=a[h];a[h]=a[u],a[u++]=m}}return u},"lop;++p,l+=s){var d=i[l+f],m=i[l+h];if(dg;++g){var v=i[l+g];i[l+g]=i[c],i[c++]=v}var y=a[p];a[p]=a[u],a[u++]=y}}return u},"lo<=p0&&p0<=hi":function(t,e,r,n,i,a,o){for(var s=2*t,l=s*r,c=l,u=r,f=e,h=t+e,p=r;n>p;++p,l+=s){var d=i[l+f],m=i[l+h];if(d<=o&&o<=m)if(u===p)u+=1,c+=s;else{for(var g=0;s>g;++g){var v=i[l+g];i[l+g]=i[c],i[c++]=v}var y=a[p];a[p]=a[u],a[u++]=y}}return u},"!(lo>=p0)&&!(p1>=hi)":function(t,e,r,n,i,a,o,s){for(var l=2*t,c=l*r,u=c,f=r,h=e,p=t+e,d=r;n>d;++d,c+=l){var m=i[c+h],g=i[c+p];if(!(m>=o||s>=g))if(f===d)f+=1,u+=l;else{for(var v=0;l>v;++v){var y=i[c+v];i[c+v]=i[u],i[u++]=y}var x=a[d];a[d]=a[f],a[f++]=x}}return f}}},{}],40:[function(t,e,r){"use strict";e.exports=function(t,e){e<=128?n(0,e-1,t):function t(e,r,u){var f=(r-e+1)/6|0,h=e+f,p=r-f,d=e+r>>1,m=d-f,g=d+f,v=h,y=m,x=d,b=g,_=p,w=e+1,T=r-1,k=0;l(v,y,u)&&(k=v,v=y,y=k);l(b,_,u)&&(k=b,b=_,_=k);l(v,x,u)&&(k=v,v=x,x=k);l(y,x,u)&&(k=y,y=x,x=k);l(v,b,u)&&(k=v,v=b,b=k);l(x,b,u)&&(k=x,x=b,b=k);l(y,_,u)&&(k=y,y=_,_=k);l(y,x,u)&&(k=y,y=x,x=k);l(b,_,u)&&(k=b,b=_,_=k);for(var A=u[2*y],M=u[2*y+1],S=u[2*b],E=u[2*b+1],L=2*v,C=2*x,P=2*_,I=2*h,O=2*d,z=2*p,D=0;D<2;++D){var R=u[L+D],F=u[C+D],B=u[P+D];u[I+D]=R,u[O+D]=F,u[z+D]=B}a(m,e,u),a(g,r,u);for(var N=w;N<=T;++N)if(c(N,A,M,u))N!==w&&i(N,w,u),++w;else if(!c(N,S,E,u))for(;;){if(c(T,S,E,u)){c(T,A,M,u)?(o(N,w,T,u),++w,--T):(i(N,T,u),--T);break}if(--Tt;){var c=r[l-2],u=r[l-1];if(cr[e+1])}function c(t,e,r,n){var i=n[t*=2];return i>>1;a(h,M);var S=0,E=0;for(w=0;w=1<<28)p(l,c,E--,L=L-(1<<28)|0);else if(L>=0)p(o,s,S--,L);else if(L<=-(1<<28)){L=-L-(1<<28)|0;for(var C=0;C>>1;a(h,E);var L=0,C=0,P=0;for(k=0;k>1==h[2*k+3]>>1&&(O=2,k+=1),I<0){for(var z=-(I>>1)-1,D=0;D>1)-1;0===O?p(o,s,L--,z):1===O?p(l,c,C--,z):2===O&&p(u,f,P--,z)}}},scanBipartite:function(t,e,r,n,i,l,c,u,f,m,g,v){var y=0,x=2*t,b=e,_=e+t,w=1,T=1;n?T=1<<28:w=1<<28;for(var k=i;k>>1;a(h,E);var L=0;for(k=0;k=1<<28?(P=!n,A-=1<<28):(P=!!n,A-=1),P)d(o,s,L++,A);else{var I=v[A],O=x*A,z=g[O+e+1],D=g[O+e+1+t];t:for(var R=0;R>>1;a(h,w);var T=0;for(y=0;y=1<<28)o[T++]=x-(1<<28);else{var A=p[x-=1],M=m*x,S=f[M+e+1],E=f[M+e+1+t];t:for(var L=0;L=0;--L)if(o[L]===x){for(O=L+1;O0;){for(var p=r.pop(),d=(s=r.pop(),u=-1,f=-1,l=o[s],1);d=0||(e.flip(s,p),i(t,e,r,u,s,f),i(t,e,r,s,f,u),i(t,e,r,f,p,u),i(t,e,r,p,u,f)))}}},{"binary-search-bounds":31,"robust-in-sphere":282}],44:[function(t,e,r){"use strict";var n,i=t("binary-search-bounds");function a(t,e,r,n,i,a,o){this.cells=t,this.neighbor=e,this.flags=n,this.constraint=r,this.active=i,this.next=a,this.boundary=o}function o(t,e){return t[0]-e[0]||t[1]-e[1]||t[2]-e[2]}e.exports=function(t,e,r){var n=function(t,e){for(var r=t.cells(),n=r.length,i=0;i0||l.length>0;){for(;s.length>0;){var p=s.pop();if(c[p]!==-i){c[p]=i;u[p];for(var d=0;d<3;++d){var m=h[3*p+d];m>=0&&0===c[m]&&(f[3*p+d]?l.push(m):(s.push(m),c[m]=i))}}}var g=l;l=s,s=g,l.length=0,i=-i}var v=function(t,e,r){for(var n=0,i=0;i1&&i(r[h[p-2]],r[h[p-1]],a)>0;)t.push([h[p-1],h[p-2],o]),p-=1;h.length=p,h.push(o);var d=f.upperIds;for(p=d.length;p>1&&i(r[d[p-2]],r[d[p-1]],a)<0;)t.push([d[p-2],d[p-1],o]),p-=1;d.length=p,d.push(o)}}function u(t,e){var r;return(r=t.a[0]d[0]&&i.push(new o(d,p,2,l),new o(p,d,1,l))}i.sort(s);for(var m=i[0].a[0]-(1+Math.abs(i[0].a[0]))*Math.pow(2,-52),g=[new a([m,1],[m,0],-1,[],[],[],[])],v=[],y=(l=0,i.length);l=0}}(),a.removeTriangle=function(t,e,r){var n=this.stars;o(n[t],e,r),o(n[e],r,t),o(n[r],t,e)},a.addTriangle=function(t,e,r){var n=this.stars;n[t].push(e,r),n[e].push(r,t),n[r].push(t,e)},a.opposite=function(t,e){for(var r=this.stars[e],n=1,i=r.length;ne[2]?1:0)}function v(t,e,r){if(0!==t.length){if(e)for(var n=0;n=0;--a){var x=e[u=(S=n[a])[0]],b=x[0],_=x[1],w=t[b],T=t[_];if((w[0]-T[0]||w[1]-T[1])<0){var k=b;b=_,_=k}x[0]=b;var A,M=x[1]=S[1];for(i&&(A=x[2]);a>0&&n[a-1][0]===u;){var S,E=(S=n[--a])[1];i?e.push([M,E,A]):e.push([M,E]),M=E}i?e.push([M,_,A]):e.push([M,_])}return h}(t,e,h,g,r));return v(e,y,r),!!y||(h.length>0||g.length>0)}},{"./lib/rat-seg-intersect":51,"big-rat":18,"big-rat/cmp":16,"big-rat/to-float":30,"box-intersect":35,nextafter:260,"rat-vec":273,"robust-segment-intersect":287,"union-find":309}],51:[function(t,e,r){"use strict";e.exports=function(t,e,r,n){var a=s(e,t),f=s(n,r),h=u(a,f);if(0===o(h))return null;var p=s(t,r),d=u(f,p),m=i(d,h),g=c(a,m);return l(t,g)};var n=t("big-rat/mul"),i=t("big-rat/div"),a=t("big-rat/sub"),o=t("big-rat/sign"),s=t("rat-vec/sub"),l=t("rat-vec/add"),c=t("rat-vec/muls");function u(t,e){return a(n(t[0],e[1]),n(t[1],e[0]))}},{"big-rat/div":17,"big-rat/mul":27,"big-rat/sign":28,"big-rat/sub":29,"rat-vec/add":272,"rat-vec/muls":274,"rat-vec/sub":275}],52:[function(t,e,r){e.exports={jet:[{index:0,rgb:[0,0,131]},{index:.125,rgb:[0,60,170]},{index:.375,rgb:[5,255,255]},{index:.625,rgb:[255,255,0]},{index:.875,rgb:[250,0,0]},{index:1,rgb:[128,0,0]}],hsv:[{index:0,rgb:[255,0,0]},{index:.169,rgb:[253,255,2]},{index:.173,rgb:[247,255,2]},{index:.337,rgb:[0,252,4]},{index:.341,rgb:[0,252,10]},{index:.506,rgb:[1,249,255]},{index:.671,rgb:[2,0,253]},{index:.675,rgb:[8,0,253]},{index:.839,rgb:[255,0,251]},{index:.843,rgb:[255,0,245]},{index:1,rgb:[255,0,6]}],hot:[{index:0,rgb:[0,0,0]},{index:.3,rgb:[230,0,0]},{index:.6,rgb:[255,210,0]},{index:1,rgb:[255,255,255]}],spring:[{index:0,rgb:[255,0,255]},{index:1,rgb:[255,255,0]}],summer:[{index:0,rgb:[0,128,102]},{index:1,rgb:[255,255,102]}],autumn:[{index:0,rgb:[255,0,0]},{index:1,rgb:[255,255,0]}],winter:[{index:0,rgb:[0,0,255]},{index:1,rgb:[0,255,128]}],bone:[{index:0,rgb:[0,0,0]},{index:.376,rgb:[84,84,116]},{index:.753,rgb:[169,200,200]},{index:1,rgb:[255,255,255]}],copper:[{index:0,rgb:[0,0,0]},{index:.804,rgb:[255,160,102]},{index:1,rgb:[255,199,127]}],greys:[{index:0,rgb:[0,0,0]},{index:1,rgb:[255,255,255]}],yignbu:[{index:0,rgb:[8,29,88]},{index:.125,rgb:[37,52,148]},{index:.25,rgb:[34,94,168]},{index:.375,rgb:[29,145,192]},{index:.5,rgb:[65,182,196]},{index:.625,rgb:[127,205,187]},{index:.75,rgb:[199,233,180]},{index:.875,rgb:[237,248,217]},{index:1,rgb:[255,255,217]}],greens:[{index:0,rgb:[0,68,27]},{index:.125,rgb:[0,109,44]},{index:.25,rgb:[35,139,69]},{index:.375,rgb:[65,171,93]},{index:.5,rgb:[116,196,118]},{index:.625,rgb:[161,217,155]},{index:.75,rgb:[199,233,192]},{index:.875,rgb:[229,245,224]},{index:1,rgb:[247,252,245]}],yiorrd:[{index:0,rgb:[128,0,38]},{index:.125,rgb:[189,0,38]},{index:.25,rgb:[227,26,28]},{index:.375,rgb:[252,78,42]},{index:.5,rgb:[253,141,60]},{index:.625,rgb:[254,178,76]},{index:.75,rgb:[254,217,118]},{index:.875,rgb:[255,237,160]},{index:1,rgb:[255,255,204]}],bluered:[{index:0,rgb:[0,0,255]},{index:1,rgb:[255,0,0]}],rdbu:[{index:0,rgb:[5,10,172]},{index:.35,rgb:[106,137,247]},{index:.5,rgb:[190,190,190]},{index:.6,rgb:[220,170,132]},{index:.7,rgb:[230,145,90]},{index:1,rgb:[178,10,28]}],picnic:[{index:0,rgb:[0,0,255]},{index:.1,rgb:[51,153,255]},{index:.2,rgb:[102,204,255]},{index:.3,rgb:[153,204,255]},{index:.4,rgb:[204,204,255]},{index:.5,rgb:[255,255,255]},{index:.6,rgb:[255,204,255]},{index:.7,rgb:[255,153,255]},{index:.8,rgb:[255,102,204]},{index:.9,rgb:[255,102,102]},{index:1,rgb:[255,0,0]}],rainbow:[{index:0,rgb:[150,0,90]},{index:.125,rgb:[0,0,200]},{index:.25,rgb:[0,25,255]},{index:.375,rgb:[0,152,255]},{index:.5,rgb:[44,255,150]},{index:.625,rgb:[151,255,0]},{index:.75,rgb:[255,234,0]},{index:.875,rgb:[255,111,0]},{index:1,rgb:[255,0,0]}],portland:[{index:0,rgb:[12,51,131]},{index:.25,rgb:[10,136,186]},{index:.5,rgb:[242,211,56]},{index:.75,rgb:[242,143,56]},{index:1,rgb:[217,30,30]}],blackbody:[{index:0,rgb:[0,0,0]},{index:.2,rgb:[230,0,0]},{index:.4,rgb:[230,210,0]},{index:.7,rgb:[255,255,255]},{index:1,rgb:[160,200,255]}],earth:[{index:0,rgb:[0,0,130]},{index:.1,rgb:[0,180,180]},{index:.2,rgb:[40,210,40]},{index:.4,rgb:[230,230,50]},{index:.6,rgb:[120,70,20]},{index:1,rgb:[255,255,255]}],electric:[{index:0,rgb:[0,0,0]},{index:.15,rgb:[30,0,100]},{index:.4,rgb:[120,0,100]},{index:.6,rgb:[160,90,0]},{index:.8,rgb:[230,200,0]},{index:1,rgb:[255,250,220]}],alpha:[{index:0,rgb:[255,255,255,0]},{index:1,rgb:[255,255,255,1]}],viridis:[{index:0,rgb:[68,1,84]},{index:.13,rgb:[71,44,122]},{index:.25,rgb:[59,81,139]},{index:.38,rgb:[44,113,142]},{index:.5,rgb:[33,144,141]},{index:.63,rgb:[39,173,129]},{index:.75,rgb:[92,200,99]},{index:.88,rgb:[170,220,50]},{index:1,rgb:[253,231,37]}],inferno:[{index:0,rgb:[0,0,4]},{index:.13,rgb:[31,12,72]},{index:.25,rgb:[85,15,109]},{index:.38,rgb:[136,34,106]},{index:.5,rgb:[186,54,85]},{index:.63,rgb:[227,89,51]},{index:.75,rgb:[249,140,10]},{index:.88,rgb:[249,201,50]},{index:1,rgb:[252,255,164]}],magma:[{index:0,rgb:[0,0,4]},{index:.13,rgb:[28,16,68]},{index:.25,rgb:[79,18,123]},{index:.38,rgb:[129,37,129]},{index:.5,rgb:[181,54,122]},{index:.63,rgb:[229,80,100]},{index:.75,rgb:[251,135,97]},{index:.88,rgb:[254,194,135]},{index:1,rgb:[252,253,191]}],plasma:[{index:0,rgb:[13,8,135]},{index:.13,rgb:[75,3,161]},{index:.25,rgb:[125,3,168]},{index:.38,rgb:[168,34,150]},{index:.5,rgb:[203,70,121]},{index:.63,rgb:[229,107,93]},{index:.75,rgb:[248,148,65]},{index:.88,rgb:[253,195,40]},{index:1,rgb:[240,249,33]}],warm:[{index:0,rgb:[125,0,179]},{index:.13,rgb:[172,0,187]},{index:.25,rgb:[219,0,170]},{index:.38,rgb:[255,0,130]},{index:.5,rgb:[255,63,74]},{index:.63,rgb:[255,123,0]},{index:.75,rgb:[234,176,0]},{index:.88,rgb:[190,228,0]},{index:1,rgb:[147,255,0]}],cool:[{index:0,rgb:[125,0,179]},{index:.13,rgb:[116,0,218]},{index:.25,rgb:[98,74,237]},{index:.38,rgb:[68,146,231]},{index:.5,rgb:[0,204,197]},{index:.63,rgb:[0,247,146]},{index:.75,rgb:[0,255,88]},{index:.88,rgb:[40,255,8]},{index:1,rgb:[147,255,0]}],"rainbow-soft":[{index:0,rgb:[125,0,179]},{index:.1,rgb:[199,0,180]},{index:.2,rgb:[255,0,121]},{index:.3,rgb:[255,108,0]},{index:.4,rgb:[222,194,0]},{index:.5,rgb:[150,255,0]},{index:.6,rgb:[0,255,55]},{index:.7,rgb:[0,246,150]},{index:.8,rgb:[50,167,222]},{index:.9,rgb:[103,51,235]},{index:1,rgb:[124,0,186]}],bathymetry:[{index:0,rgb:[40,26,44]},{index:.13,rgb:[59,49,90]},{index:.25,rgb:[64,76,139]},{index:.38,rgb:[63,110,151]},{index:.5,rgb:[72,142,158]},{index:.63,rgb:[85,174,163]},{index:.75,rgb:[120,206,163]},{index:.88,rgb:[187,230,172]},{index:1,rgb:[253,254,204]}],cdom:[{index:0,rgb:[47,15,62]},{index:.13,rgb:[87,23,86]},{index:.25,rgb:[130,28,99]},{index:.38,rgb:[171,41,96]},{index:.5,rgb:[206,67,86]},{index:.63,rgb:[230,106,84]},{index:.75,rgb:[242,149,103]},{index:.88,rgb:[249,193,135]},{index:1,rgb:[254,237,176]}],chlorophyll:[{index:0,rgb:[18,36,20]},{index:.13,rgb:[25,63,41]},{index:.25,rgb:[24,91,59]},{index:.38,rgb:[13,119,72]},{index:.5,rgb:[18,148,80]},{index:.63,rgb:[80,173,89]},{index:.75,rgb:[132,196,122]},{index:.88,rgb:[175,221,162]},{index:1,rgb:[215,249,208]}],density:[{index:0,rgb:[54,14,36]},{index:.13,rgb:[89,23,80]},{index:.25,rgb:[110,45,132]},{index:.38,rgb:[120,77,178]},{index:.5,rgb:[120,113,213]},{index:.63,rgb:[115,151,228]},{index:.75,rgb:[134,185,227]},{index:.88,rgb:[177,214,227]},{index:1,rgb:[230,241,241]}],"freesurface-blue":[{index:0,rgb:[30,4,110]},{index:.13,rgb:[47,14,176]},{index:.25,rgb:[41,45,236]},{index:.38,rgb:[25,99,212]},{index:.5,rgb:[68,131,200]},{index:.63,rgb:[114,156,197]},{index:.75,rgb:[157,181,203]},{index:.88,rgb:[200,208,216]},{index:1,rgb:[241,237,236]}],"freesurface-red":[{index:0,rgb:[60,9,18]},{index:.13,rgb:[100,17,27]},{index:.25,rgb:[142,20,29]},{index:.38,rgb:[177,43,27]},{index:.5,rgb:[192,87,63]},{index:.63,rgb:[205,125,105]},{index:.75,rgb:[216,162,148]},{index:.88,rgb:[227,199,193]},{index:1,rgb:[241,237,236]}],oxygen:[{index:0,rgb:[64,5,5]},{index:.13,rgb:[106,6,15]},{index:.25,rgb:[144,26,7]},{index:.38,rgb:[168,64,3]},{index:.5,rgb:[188,100,4]},{index:.63,rgb:[206,136,11]},{index:.75,rgb:[220,174,25]},{index:.88,rgb:[231,215,44]},{index:1,rgb:[248,254,105]}],par:[{index:0,rgb:[51,20,24]},{index:.13,rgb:[90,32,35]},{index:.25,rgb:[129,44,34]},{index:.38,rgb:[159,68,25]},{index:.5,rgb:[182,99,19]},{index:.63,rgb:[199,134,22]},{index:.75,rgb:[212,171,35]},{index:.88,rgb:[221,210,54]},{index:1,rgb:[225,253,75]}],phase:[{index:0,rgb:[145,105,18]},{index:.13,rgb:[184,71,38]},{index:.25,rgb:[186,58,115]},{index:.38,rgb:[160,71,185]},{index:.5,rgb:[110,97,218]},{index:.63,rgb:[50,123,164]},{index:.75,rgb:[31,131,110]},{index:.88,rgb:[77,129,34]},{index:1,rgb:[145,105,18]}],salinity:[{index:0,rgb:[42,24,108]},{index:.13,rgb:[33,50,162]},{index:.25,rgb:[15,90,145]},{index:.38,rgb:[40,118,137]},{index:.5,rgb:[59,146,135]},{index:.63,rgb:[79,175,126]},{index:.75,rgb:[120,203,104]},{index:.88,rgb:[193,221,100]},{index:1,rgb:[253,239,154]}],temperature:[{index:0,rgb:[4,35,51]},{index:.13,rgb:[23,51,122]},{index:.25,rgb:[85,59,157]},{index:.38,rgb:[129,79,143]},{index:.5,rgb:[175,95,130]},{index:.63,rgb:[222,112,101]},{index:.75,rgb:[249,146,66]},{index:.88,rgb:[249,196,65]},{index:1,rgb:[232,250,91]}],turbidity:[{index:0,rgb:[34,31,27]},{index:.13,rgb:[65,50,41]},{index:.25,rgb:[98,69,52]},{index:.38,rgb:[131,89,57]},{index:.5,rgb:[161,112,59]},{index:.63,rgb:[185,140,66]},{index:.75,rgb:[202,174,88]},{index:.88,rgb:[216,209,126]},{index:1,rgb:[233,246,171]}],"velocity-blue":[{index:0,rgb:[17,32,64]},{index:.13,rgb:[35,52,116]},{index:.25,rgb:[29,81,156]},{index:.38,rgb:[31,113,162]},{index:.5,rgb:[50,144,169]},{index:.63,rgb:[87,173,176]},{index:.75,rgb:[149,196,189]},{index:.88,rgb:[203,221,211]},{index:1,rgb:[254,251,230]}],"velocity-green":[{index:0,rgb:[23,35,19]},{index:.13,rgb:[24,64,38]},{index:.25,rgb:[11,95,45]},{index:.38,rgb:[39,123,35]},{index:.5,rgb:[95,146,12]},{index:.63,rgb:[152,165,18]},{index:.75,rgb:[201,186,69]},{index:.88,rgb:[233,216,137]},{index:1,rgb:[255,253,205]}],cubehelix:[{index:0,rgb:[0,0,0]},{index:.07,rgb:[22,5,59]},{index:.13,rgb:[60,4,105]},{index:.2,rgb:[109,1,135]},{index:.27,rgb:[161,0,147]},{index:.33,rgb:[210,2,142]},{index:.4,rgb:[251,11,123]},{index:.47,rgb:[255,29,97]},{index:.53,rgb:[255,54,69]},{index:.6,rgb:[255,85,46]},{index:.67,rgb:[255,120,34]},{index:.73,rgb:[255,157,37]},{index:.8,rgb:[241,191,57]},{index:.87,rgb:[224,220,93]},{index:.93,rgb:[218,241,142]},{index:1,rgb:[227,253,198]}]}},{}],53:[function(t,e,r){"use strict";var n=t("./colorScale"),i=t("lerp");function a(t){return[t[0]/255,t[1]/255,t[2]/255,t[3]]}function o(t){for(var e,r="#",n=0;n<3;++n)r+=("00"+(e=(e=t[n]).toString(16))).substr(e.length);return r}function s(t){return"rgba("+t.join(",")+")"}e.exports=function(t){var e,r,l,c,u,f,h,p,d,m;t||(t={});p=(t.nshades||72)-1,h=t.format||"hex",(f=t.colormap)||(f="jet");if("string"==typeof f){if(f=f.toLowerCase(),!n[f])throw Error(f+" not a supported colorscale");u=n[f]}else{if(!Array.isArray(f))throw Error("unsupported colormap option",f);u=f.slice()}if(u.length>p+1)throw new Error(f+" map requires nshades to be at least size "+u.length);d=Array.isArray(t.alpha)?2!==t.alpha.length?[1,1]:t.alpha.slice():"number"==typeof t.alpha?[t.alpha,t.alpha]:[1,1];e=u.map((function(t){return Math.round(t.index*p)})),d[0]=Math.min(Math.max(d[0],0),1),d[1]=Math.min(Math.max(d[1],0),1);var g=u.map((function(t,e){var r=u[e].index,n=u[e].rgb.slice();return 4===n.length&&n[3]>=0&&n[3]<=1||(n[3]=d[0]+(d[1]-d[0])*r),n})),v=[];for(m=0;m0||l(t,e,a)?-1:1:0===s?c>0||l(t,e,r)?1:-1:i(c-s)}var h=n(t,e,r);return h>0?o>0&&n(t,e,a)>0?1:-1:h<0?o>0||n(t,e,a)>0?1:-1:n(t,e,a)>0||l(t,e,r)?1:-1};var n=t("robust-orientation"),i=t("signum"),a=t("two-sum"),o=t("robust-product"),s=t("robust-sum");function l(t,e,r){var n=a(t[0],-e[0]),i=a(t[1],-e[1]),l=a(r[0],-e[0]),c=a(r[1],-e[1]),u=s(o(n,l),o(i,c));return u[u.length-1]>=0}},{"robust-orientation":284,"robust-product":285,"robust-sum":289,signum:55,"two-sum":307}],55:[function(t,e,r){"use strict";e.exports=function(t){return t<0?-1:t>0?1:0}},{}],56:[function(t,e,r){e.exports=function(t,e){var r=t.length,a=t.length-e.length;if(a)return a;switch(r){case 0:return 0;case 1:return t[0]-e[0];case 2:return t[0]+t[1]-e[0]-e[1]||n(t[0],t[1])-n(e[0],e[1]);case 3:var o=t[0]+t[1],s=e[0]+e[1];if(a=o+t[2]-(s+e[2]))return a;var l=n(t[0],t[1]),c=n(e[0],e[1]);return n(l,t[2])-n(c,e[2])||n(l+t[2],o)-n(c+e[2],s);case 4:var u=t[0],f=t[1],h=t[2],p=t[3],d=e[0],m=e[1],g=e[2],v=e[3];return u+f+h+p-(d+m+g+v)||n(u,f,h,p)-n(d,m,g,v,d)||n(u+f,u+h,u+p,f+h,f+p,h+p)-n(d+m,d+g,d+v,m+g,m+v,g+v)||n(u+f+h,u+f+p,u+h+p,f+h+p)-n(d+m+g,d+m+v,d+g+v,m+g+v);default:for(var y=t.slice().sort(i),x=e.slice().sort(i),b=0;bt[r][0]&&(r=n);return er?[[r],[e]]:[[e]]}},{}],60:[function(t,e,r){"use strict";e.exports=function(t){var e=n(t),r=e.length;if(r<=2)return[];for(var i=new Array(r),a=e[r-1],o=0;o=e[l]&&(s+=1);a[o]=s}}return t}(n(a,!0),r)}};var n=t("incremental-convex-hull"),i=t("affine-hull")},{"affine-hull":10,"incremental-convex-hull":233}],62:[function(t,e,r){"use strict";e.exports=function(t,e,r,n,i,a){var o=i-1,s=i*i,l=o*o,c=(1+2*i)*l,u=i*l,f=s*(3-2*i),h=s*o;if(t.length){a||(a=new Array(t.length));for(var p=t.length-1;p>=0;--p)a[p]=c*t[p]+u*e[p]+f*r[p]+h*n[p];return a}return c*t+u*e+f*r+h*n},e.exports.derivative=function(t,e,r,n,i,a){var o=6*i*i-6*i,s=3*i*i-4*i+1,l=-6*i*i+6*i,c=3*i*i-2*i;if(t.length){a||(a=new Array(t.length));for(var u=t.length-1;u>=0;--u)a[u]=o*t[u]+s*e[u]+l*r[u]+c*n[u];return a}return o*t+s*e+l*r[u]+c*n}},{}],63:[function(t,e,r){"use strict";var n=t("incremental-convex-hull"),i=t("uniq");function a(t,e){this.point=t,this.index=e}function o(t,e){for(var r=t.point,n=e.point,i=r.length,a=0;a=2)return!1;t[r]=n}return!0})):_.filter((function(t){for(var e=0;e<=s;++e){var r=v[t[e]];if(r<0)return!1;t[e]=r}return!0}));if(1&s)for(u=0;u<_.length;++u){h=(b=_[u])[0];b[0]=b[1],b[1]=h}return _}},{"incremental-convex-hull":233,uniq:310}],64:[function(t,e,r){(function(t){(function(){var r=!1;if("undefined"!=typeof Float64Array){var n=new Float64Array(1),i=new Uint32Array(n.buffer);if(n[0]=1,r=!0,1072693248===i[1]){e.exports=function(t){return n[0]=t,[i[0],i[1]]},e.exports.pack=function(t,e){return i[0]=t,i[1]=e,n[0]},e.exports.lo=function(t){return n[0]=t,i[0]},e.exports.hi=function(t){return n[0]=t,i[1]}}else if(1072693248===i[0]){e.exports=function(t){return n[0]=t,[i[1],i[0]]},e.exports.pack=function(t,e){return i[1]=t,i[0]=e,n[0]},e.exports.lo=function(t){return n[0]=t,i[1]},e.exports.hi=function(t){return n[0]=t,i[0]}}else r=!1}if(!r){var a=new t(8);e.exports=function(t){return a.writeDoubleLE(t,0,!0),[a.readUInt32LE(0,!0),a.readUInt32LE(4,!0)]},e.exports.pack=function(t,e){return a.writeUInt32LE(t,0,!0),a.writeUInt32LE(e,4,!0),a.readDoubleLE(0,!0)},e.exports.lo=function(t){return a.writeDoubleLE(t,0,!0),a.readUInt32LE(0,!0)},e.exports.hi=function(t){return a.writeDoubleLE(t,0,!0),a.readUInt32LE(4,!0)}}e.exports.sign=function(t){return e.exports.hi(t)>>>31},e.exports.exponent=function(t){return(e.exports.hi(t)<<1>>>21)-1023},e.exports.fraction=function(t){var r=e.exports.lo(t),n=e.exports.hi(t),i=1048575&n;return 2146435072&n&&(i+=1<<20),[r,i]},e.exports.denormalized=function(t){return!(2146435072&e.exports.hi(t))}}).call(this)}).call(this,t("buffer").Buffer)},{buffer:3}],65:[function(t,e,r){"use strict";e.exports=function(t,e){switch(void 0===e&&(e=0),typeof t){case"number":if(t>0)return function(t,e){var r,n;for(r=new Array(t),n=0;n=r-1){h=l.length-1;var d=t-e[r-1];for(p=0;p=r-1)for(var u=s.length-1,f=(e[r-1],0);f=0;--r)if(t[--e])return!1;return!0},s.jump=function(t){var e=this.lastT(),r=this.dimension;if(!(t0;--f)n.push(a(l[f-1],c[f-1],arguments[f])),i.push(0)}},s.push=function(t){var e=this.lastT(),r=this.dimension;if(!(t1e-6?1/s:0;this._time.push(t);for(var h=r;h>0;--h){var p=a(c[h-1],u[h-1],arguments[h]);n.push(p),i.push((p-n[o++])*f)}}},s.set=function(t){var e=this.dimension;if(!(t0;--l)r.push(a(o[l-1],s[l-1],arguments[l])),n.push(0)}},s.move=function(t){var e=this.lastT(),r=this.dimension;if(!(t<=e||arguments.length!==r+1)){var n=this._state,i=this._velocity,o=n.length-this.dimension,s=this.bounds,l=s[0],c=s[1],u=t-e,f=u>1e-6?1/u:0;this._time.push(t);for(var h=r;h>0;--h){var p=arguments[h];n.push(a(l[h-1],c[h-1],n[o++]+p)),i.push(p*f)}}},s.idle=function(t){var e=this.lastT();if(!(t=0;--f)n.push(a(l[f],c[f],n[o]+u*i[o])),i.push(0),o+=1}}},{"binary-search-bounds":31,"cubic-hermite":62}],69:[function(t,e,r){"use strict";e.exports=function(t){return new s(t||m,null)};function n(t,e,r,n,i,a){this._color=t,this.key=e,this.value=r,this.left=n,this.right=i,this._count=a}function i(t){return new n(t._color,t.key,t.value,t.left,t.right,t._count)}function a(t,e){return new n(t,e.key,e.value,e.left,e.right,e._count)}function o(t){t._count=1+(t.left?t.left._count:0)+(t.right?t.right._count:0)}function s(t,e){this._compare=t,this.root=e}var l=s.prototype;function c(t,e){var r;if(e.left&&(r=c(t,e.left)))return r;return(r=t(e.key,e.value))||(e.right?c(t,e.right):void 0)}function u(t,e,r,n){if(e(t,n.key)<=0){var i;if(n.left)if(i=u(t,e,r,n.left))return i;if(i=r(n.key,n.value))return i}if(n.right)return u(t,e,r,n.right)}function f(t,e,r,n,i){var a,o=r(t,i.key),s=r(e,i.key);if(o<=0){if(i.left&&(a=f(t,e,r,n,i.left)))return a;if(s>0&&(a=n(i.key,i.value)))return a}if(s>0&&i.right)return f(t,e,r,n,i.right)}function h(t,e){this.tree=t,this._stack=e}Object.defineProperty(l,"keys",{get:function(){var t=[];return this.forEach((function(e,r){t.push(e)})),t}}),Object.defineProperty(l,"values",{get:function(){var t=[];return this.forEach((function(e,r){t.push(r)})),t}}),Object.defineProperty(l,"length",{get:function(){return this.root?this.root._count:0}}),l.insert=function(t,e){for(var r=this._compare,i=this.root,l=[],c=[];i;){var u=r(t,i.key);l.push(i),c.push(u),i=u<=0?i.left:i.right}l.push(new n(0,t,e,null,null,1));for(var f=l.length-2;f>=0;--f){i=l[f];c[f]<=0?l[f]=new n(i._color,i.key,i.value,l[f+1],i.right,i._count+1):l[f]=new n(i._color,i.key,i.value,i.left,l[f+1],i._count+1)}for(f=l.length-1;f>1;--f){var h=l[f-1];i=l[f];if(1===h._color||1===i._color)break;var p=l[f-2];if(p.left===h)if(h.left===i){if(!(d=p.right)||0!==d._color){if(p._color=0,p.left=h.right,h._color=1,h.right=p,l[f-2]=h,l[f-1]=i,o(p),o(h),f>=3)(m=l[f-3]).left===p?m.left=h:m.right=h;break}h._color=1,p.right=a(1,d),p._color=0,f-=1}else{if(!(d=p.right)||0!==d._color){if(h.right=i.left,p._color=0,p.left=i.right,i._color=1,i.left=h,i.right=p,l[f-2]=i,l[f-1]=h,o(p),o(h),o(i),f>=3)(m=l[f-3]).left===p?m.left=i:m.right=i;break}h._color=1,p.right=a(1,d),p._color=0,f-=1}else if(h.right===i){if(!(d=p.left)||0!==d._color){if(p._color=0,p.right=h.left,h._color=1,h.left=p,l[f-2]=h,l[f-1]=i,o(p),o(h),f>=3)(m=l[f-3]).right===p?m.right=h:m.left=h;break}h._color=1,p.left=a(1,d),p._color=0,f-=1}else{var d;if(!(d=p.left)||0!==d._color){var m;if(h.left=i.right,p._color=0,p.right=i.left,i._color=1,i.right=h,i.left=p,l[f-2]=i,l[f-1]=h,o(p),o(h),o(i),f>=3)(m=l[f-3]).right===p?m.right=i:m.left=i;break}h._color=1,p.left=a(1,d),p._color=0,f-=1}}return l[0]._color=1,new s(r,l[0])},l.forEach=function(t,e,r){if(this.root)switch(arguments.length){case 1:return c(t,this.root);case 2:return u(e,this._compare,t,this.root);case 3:if(this._compare(e,r)>=0)return;return f(e,r,this._compare,t,this.root)}},Object.defineProperty(l,"begin",{get:function(){for(var t=[],e=this.root;e;)t.push(e),e=e.left;return new h(this,t)}}),Object.defineProperty(l,"end",{get:function(){for(var t=[],e=this.root;e;)t.push(e),e=e.right;return new h(this,t)}}),l.at=function(t){if(t<0)return new h(this,[]);for(var e=this.root,r=[];;){if(r.push(e),e.left){if(t=e.right._count)break;e=e.right}return new h(this,[])},l.ge=function(t){for(var e=this._compare,r=this.root,n=[],i=0;r;){var a=e(t,r.key);n.push(r),a<=0&&(i=n.length),r=a<=0?r.left:r.right}return n.length=i,new h(this,n)},l.gt=function(t){for(var e=this._compare,r=this.root,n=[],i=0;r;){var a=e(t,r.key);n.push(r),a<0&&(i=n.length),r=a<0?r.left:r.right}return n.length=i,new h(this,n)},l.lt=function(t){for(var e=this._compare,r=this.root,n=[],i=0;r;){var a=e(t,r.key);n.push(r),a>0&&(i=n.length),r=a<=0?r.left:r.right}return n.length=i,new h(this,n)},l.le=function(t){for(var e=this._compare,r=this.root,n=[],i=0;r;){var a=e(t,r.key);n.push(r),a>=0&&(i=n.length),r=a<0?r.left:r.right}return n.length=i,new h(this,n)},l.find=function(t){for(var e=this._compare,r=this.root,n=[];r;){var i=e(t,r.key);if(n.push(r),0===i)return new h(this,n);r=i<=0?r.left:r.right}return new h(this,[])},l.remove=function(t){var e=this.find(t);return e?e.remove():this},l.get=function(t){for(var e=this._compare,r=this.root;r;){var n=e(t,r.key);if(0===n)return r.value;r=n<=0?r.left:r.right}};var p=h.prototype;function d(t,e){t.key=e.key,t.value=e.value,t.left=e.left,t.right=e.right,t._color=e._color,t._count=e._count}function m(t,e){return te?1:0}Object.defineProperty(p,"valid",{get:function(){return this._stack.length>0}}),Object.defineProperty(p,"node",{get:function(){return this._stack.length>0?this._stack[this._stack.length-1]:null},enumerable:!0}),p.clone=function(){return new h(this.tree,this._stack.slice())},p.remove=function(){var t=this._stack;if(0===t.length)return this.tree;var e=new Array(t.length),r=t[t.length-1];e[e.length-1]=new n(r._color,r.key,r.value,r.left,r.right,r._count);for(var l=t.length-2;l>=0;--l){(r=t[l]).left===t[l+1]?e[l]=new n(r._color,r.key,r.value,e[l+1],r.right,r._count):e[l]=new n(r._color,r.key,r.value,r.left,e[l+1],r._count)}if((r=e[e.length-1]).left&&r.right){var c=e.length;for(r=r.left;r.right;)e.push(r),r=r.right;var u=e[c-1];e.push(new n(r._color,u.key,u.value,r.left,r.right,r._count)),e[c-1].key=r.key,e[c-1].value=r.value;for(l=e.length-2;l>=c;--l)r=e[l],e[l]=new n(r._color,r.key,r.value,r.left,e[l+1],r._count);e[c-1].left=e[c]}if(0===(r=e[e.length-1])._color){var f=e[e.length-2];f.left===r?f.left=null:f.right===r&&(f.right=null),e.pop();for(l=0;l=0;--l){if(e=t[l],0===l)return void(e._color=1);if((r=t[l-1]).left===e){if((n=r.right).right&&0===n.right._color){if(s=(n=r.right=i(n)).right=i(n.right),r.right=n.left,n.left=r,n.right=s,n._color=r._color,e._color=1,r._color=1,s._color=1,o(r),o(n),l>1)(c=t[l-2]).left===r?c.left=n:c.right=n;return void(t[l-1]=n)}if(n.left&&0===n.left._color){if(s=(n=r.right=i(n)).left=i(n.left),r.right=s.left,n.left=s.right,s.left=r,s.right=n,s._color=r._color,r._color=1,n._color=1,e._color=1,o(r),o(n),o(s),l>1)(c=t[l-2]).left===r?c.left=s:c.right=s;return void(t[l-1]=s)}if(1===n._color){if(0===r._color)return r._color=1,void(r.right=a(0,n));r.right=a(0,n);continue}n=i(n),r.right=n.left,n.left=r,n._color=r._color,r._color=0,o(r),o(n),l>1&&((c=t[l-2]).left===r?c.left=n:c.right=n),t[l-1]=n,t[l]=r,l+11)(c=t[l-2]).right===r?c.right=n:c.left=n;return void(t[l-1]=n)}if(n.right&&0===n.right._color){if(s=(n=r.left=i(n)).right=i(n.right),r.left=s.right,n.right=s.left,s.right=r,s.left=n,s._color=r._color,r._color=1,n._color=1,e._color=1,o(r),o(n),o(s),l>1)(c=t[l-2]).right===r?c.right=s:c.left=s;return void(t[l-1]=s)}if(1===n._color){if(0===r._color)return r._color=1,void(r.left=a(0,n));r.left=a(0,n);continue}var c;n=i(n),r.left=n.right,n.right=r,n._color=r._color,r._color=0,o(r),o(n),l>1&&((c=t[l-2]).right===r?c.right=n:c.left=n),t[l-1]=n,t[l]=r,l+10)return this._stack[this._stack.length-1].key},enumerable:!0}),Object.defineProperty(p,"value",{get:function(){if(this._stack.length>0)return this._stack[this._stack.length-1].value},enumerable:!0}),Object.defineProperty(p,"index",{get:function(){var t=0,e=this._stack;if(0===e.length){var r=this.tree.root;return r?r._count:0}e[e.length-1].left&&(t=e[e.length-1].left._count);for(var n=e.length-2;n>=0;--n)e[n+1]===e[n].right&&(++t,e[n].left&&(t+=e[n].left._count));return t},enumerable:!0}),p.next=function(){var t=this._stack;if(0!==t.length){var e=t[t.length-1];if(e.right)for(e=e.right;e;)t.push(e),e=e.left;else for(t.pop();t.length>0&&t[t.length-1].right===e;)e=t[t.length-1],t.pop()}},Object.defineProperty(p,"hasNext",{get:function(){var t=this._stack;if(0===t.length)return!1;if(t[t.length-1].right)return!0;for(var e=t.length-1;e>0;--e)if(t[e-1].left===t[e])return!0;return!1}}),p.update=function(t){var e=this._stack;if(0===e.length)throw new Error("Can't update empty node!");var r=new Array(e.length),i=e[e.length-1];r[r.length-1]=new n(i._color,i.key,t,i.left,i.right,i._count);for(var a=e.length-2;a>=0;--a)(i=e[a]).left===e[a+1]?r[a]=new n(i._color,i.key,i.value,r[a+1],i.right,i._count):r[a]=new n(i._color,i.key,i.value,i.left,r[a+1],i._count);return new s(this.tree._compare,r[0])},p.prev=function(){var t=this._stack;if(0!==t.length){var e=t[t.length-1];if(e.left)for(e=e.left;e;)t.push(e),e=e.right;else for(t.pop();t.length>0&&t[t.length-1].left===e;)e=t[t.length-1],t.pop()}},Object.defineProperty(p,"hasPrev",{get:function(){var t=this._stack;if(0===t.length)return!1;if(t[t.length-1].left)return!0;for(var e=t.length-1;e>0;--e)if(t[e-1].right===t[e])return!0;return!1}})},{}],70:[function(t,e,r){"use strict";e.exports=function(t,e){var r=new u(t);return r.update(e),r};var n=t("./lib/text.js"),i=t("./lib/lines.js"),a=t("./lib/background.js"),o=t("./lib/cube.js"),s=t("./lib/ticks.js"),l=new Float32Array([1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]);function c(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t}function u(t){this.gl=t,this.pixelRatio=1,this.bounds=[[-10,-10,-10],[10,10,10]],this.ticks=[[],[],[]],this.autoTicks=!0,this.tickSpacing=[1,1,1],this.tickEnable=[!0,!0,!0],this.tickFont=["sans-serif","sans-serif","sans-serif"],this.tickSize=[12,12,12],this.tickAngle=[0,0,0],this.tickAlign=["auto","auto","auto"],this.tickColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.tickPad=[10,10,10],this.lastCubeProps={cubeEdges:[0,0,0],axis:[0,0,0]},this.labels=["x","y","z"],this.labelEnable=[!0,!0,!0],this.labelFont="sans-serif",this.labelSize=[20,20,20],this.labelAngle=[0,0,0],this.labelAlign=["auto","auto","auto"],this.labelColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.labelPad=[10,10,10],this.lineEnable=[!0,!0,!0],this.lineMirror=[!1,!1,!1],this.lineWidth=[1,1,1],this.lineColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.lineTickEnable=[!0,!0,!0],this.lineTickMirror=[!1,!1,!1],this.lineTickLength=[0,0,0],this.lineTickWidth=[1,1,1],this.lineTickColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.gridEnable=[!0,!0,!0],this.gridWidth=[1,1,1],this.gridColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.zeroEnable=[!0,!0,!0],this.zeroLineColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.zeroLineWidth=[2,2,2],this.backgroundEnable=[!1,!1,!1],this.backgroundColor=[[.8,.8,.8,.5],[.8,.8,.8,.5],[.8,.8,.8,.5]],this._firstInit=!0,this._text=null,this._lines=null,this._background=a(t)}var f=u.prototype;function h(){this.primalOffset=[0,0,0],this.primalMinor=[0,0,0],this.mirrorOffset=[0,0,0],this.mirrorMinor=[0,0,0]}f.update=function(t){function e(e,r,n){if(n in t){var i,a=t[n],o=this[n];(e?Array.isArray(a)&&Array.isArray(a[0]):Array.isArray(a))?this[n]=i=[r(a[0]),r(a[1]),r(a[2])]:this[n]=i=[r(a),r(a),r(a)];for(var s=0;s<3;++s)if(i[s]!==o[s])return!0}return!1}t=t||{};var r,a=e.bind(this,!1,Number),o=e.bind(this,!1,Boolean),l=e.bind(this,!1,String),c=e.bind(this,!0,(function(t){if(Array.isArray(t)){if(3===t.length)return[+t[0],+t[1],+t[2],1];if(4===t.length)return[+t[0],+t[1],+t[2],+t[3]]}return[0,0,0,1]})),u=!1,f=!1;if("bounds"in t)for(var h=t.bounds,p=0;p<2;++p)for(var d=0;d<3;++d)h[p][d]!==this.bounds[p][d]&&(f=!0),this.bounds[p][d]=h[p][d];if("ticks"in t){r=t.ticks,u=!0,this.autoTicks=!1;for(p=0;p<3;++p)this.tickSpacing[p]=0}else a("tickSpacing")&&(this.autoTicks=!0,f=!0);if(this._firstInit&&("ticks"in t||"tickSpacing"in t||(this.autoTicks=!0),f=!0,u=!0,this._firstInit=!1),f&&this.autoTicks&&(r=s.create(this.bounds,this.tickSpacing),u=!0),u){for(p=0;p<3;++p)r[p].sort((function(t,e){return t.x-e.x}));s.equal(r,this.ticks)?u=!1:this.ticks=r}o("tickEnable"),l("tickFont")&&(u=!0),a("tickSize"),a("tickAngle"),a("tickPad"),c("tickColor");var m=l("labels");l("labelFont")&&(m=!0),o("labelEnable"),a("labelSize"),a("labelPad"),c("labelColor"),o("lineEnable"),o("lineMirror"),a("lineWidth"),c("lineColor"),o("lineTickEnable"),o("lineTickMirror"),a("lineTickLength"),a("lineTickWidth"),c("lineTickColor"),o("gridEnable"),a("gridWidth"),c("gridColor"),o("zeroEnable"),c("zeroLineColor"),a("zeroLineWidth"),o("backgroundEnable"),c("backgroundColor"),this._text?this._text&&(m||u)&&this._text.update(this.bounds,this.labels,this.labelFont,this.ticks,this.tickFont):this._text=n(this.gl,this.bounds,this.labels,this.labelFont,this.ticks,this.tickFont),this._lines&&u&&(this._lines.dispose(),this._lines=null),this._lines||(this._lines=i(this.gl,this.bounds,this.ticks))};var p=[new h,new h,new h];function d(t,e,r,n,i){for(var a=t.primalOffset,o=t.primalMinor,s=t.mirrorOffset,l=t.mirrorMinor,c=n[e],u=0;u<3;++u)if(e!==u){var f=a,h=s,p=o,d=l;c&1<0?(p[u]=-1,d[u]=0):(p[u]=0,d[u]=1)}}var m=[0,0,0],g={model:l,view:l,projection:l,_ortho:!1};f.isOpaque=function(){return!0},f.isTransparent=function(){return!1},f.drawTransparent=function(t){};var v=[0,0,0],y=[0,0,0],x=[0,0,0];f.draw=function(t){t=t||g;for(var e=this.gl,r=t.model||l,n=t.view||l,i=t.projection||l,a=this.bounds,s=t._ortho||!1,u=o(r,n,i,a,s),f=u.cubeEdges,h=u.axis,b=n[12],_=n[13],w=n[14],T=n[15],k=(s?2:1)*this.pixelRatio*(i[3]*b+i[7]*_+i[11]*w+i[15]*T)/e.drawingBufferHeight,A=0;A<3;++A)this.lastCubeProps.cubeEdges[A]=f[A],this.lastCubeProps.axis[A]=h[A];var M=p;for(A=0;A<3;++A)d(p[A],A,this.bounds,f,h);e=this.gl;var S,E=m;for(A=0;A<3;++A)this.backgroundEnable[A]?E[A]=h[A]:E[A]=0;this._background.draw(r,n,i,a,E,this.backgroundColor),this._lines.bind(r,n,i,this);for(A=0;A<3;++A){var L=[0,0,0];h[A]>0?L[A]=a[1][A]:L[A]=a[0][A];for(var C=0;C<2;++C){var P=(A+1+C)%3,I=(A+1+(1^C))%3;this.gridEnable[P]&&this._lines.drawGrid(P,I,this.bounds,L,this.gridColor[P],this.gridWidth[P]*this.pixelRatio)}for(C=0;C<2;++C){P=(A+1+C)%3,I=(A+1+(1^C))%3;this.zeroEnable[I]&&Math.min(a[0][I],a[1][I])<=0&&Math.max(a[0][I],a[1][I])>=0&&this._lines.drawZero(P,I,this.bounds,L,this.zeroLineColor[I],this.zeroLineWidth[I]*this.pixelRatio)}}for(A=0;A<3;++A){this.lineEnable[A]&&this._lines.drawAxisLine(A,this.bounds,M[A].primalOffset,this.lineColor[A],this.lineWidth[A]*this.pixelRatio),this.lineMirror[A]&&this._lines.drawAxisLine(A,this.bounds,M[A].mirrorOffset,this.lineColor[A],this.lineWidth[A]*this.pixelRatio);var O=c(v,M[A].primalMinor),z=c(y,M[A].mirrorMinor),D=this.lineTickLength;for(C=0;C<3;++C){var R=k/r[5*C];O[C]*=D[C]*R,z[C]*=D[C]*R}this.lineTickEnable[A]&&this._lines.drawAxisTicks(A,M[A].primalOffset,O,this.lineTickColor[A],this.lineTickWidth[A]*this.pixelRatio),this.lineTickMirror[A]&&this._lines.drawAxisTicks(A,M[A].mirrorOffset,z,this.lineTickColor[A],this.lineTickWidth[A]*this.pixelRatio)}this._lines.unbind(),this._text.bind(r,n,i,this.pixelRatio);var F,B;function N(t){(B=[0,0,0])[t]=1}function j(t,e,r){var n=(t+1)%3,i=(t+2)%3,a=e[n],o=e[i],s=r[n],l=r[i];a>0&&l>0||a>0&&l<0||a<0&&l>0||a<0&&l<0?N(n):(o>0&&s>0||o>0&&s<0||o<0&&s>0||o<0&&s<0)&&N(i)}for(A=0;A<3;++A){var U=M[A].primalMinor,V=M[A].mirrorMinor,H=c(x,M[A].primalOffset);for(C=0;C<3;++C)this.lineTickEnable[A]&&(H[C]+=k*U[C]*Math.max(this.lineTickLength[C],0)/r[5*C]);var q=[0,0,0];if(q[A]=1,this.tickEnable[A]){-3600===this.tickAngle[A]?(this.tickAngle[A]=0,this.tickAlign[A]="auto"):this.tickAlign[A]=-1,F=1,"auto"===(S=[this.tickAlign[A],.5,F])[0]?S[0]=0:S[0]=parseInt(""+S[0]),B=[0,0,0],j(A,U,V);for(C=0;C<3;++C)H[C]+=k*U[C]*this.tickPad[C]/r[5*C];this._text.drawTicks(A,this.tickSize[A],this.tickAngle[A],H,this.tickColor[A],q,B,S)}if(this.labelEnable[A]){F=0,B=[0,0,0],this.labels[A].length>4&&(N(A),F=1),"auto"===(S=[this.labelAlign[A],.5,F])[0]?S[0]=0:S[0]=parseInt(""+S[0]);for(C=0;C<3;++C)H[C]+=k*U[C]*this.labelPad[C]/r[5*C];H[A]+=.5*(a[0][A]+a[1][A]),this._text.drawLabel(A,this.labelSize[A],this.labelAngle[A],H,this.labelColor[A],[0,0,0],B,S)}}this._text.unbind()},f.dispose=function(){this._text.dispose(),this._lines.dispose(),this._background.dispose(),this._lines=null,this._text=null,this._background=null,this.gl=null}},{"./lib/background.js":71,"./lib/cube.js":72,"./lib/lines.js":73,"./lib/text.js":75,"./lib/ticks.js":76}],71:[function(t,e,r){"use strict";e.exports=function(t){for(var e=[],r=[],s=0,l=0;l<3;++l)for(var c=(l+1)%3,u=(l+2)%3,f=[0,0,0],h=[0,0,0],p=-1;p<=1;p+=2){r.push(s,s+2,s+1,s+1,s+2,s+3),f[l]=p,h[l]=p;for(var d=-1;d<=1;d+=2){f[c]=d;for(var m=-1;m<=1;m+=2)f[u]=m,e.push(f[0],f[1],f[2],h[0],h[1],h[2]),s+=1}var g=c;c=u,u=g}var v=n(t,new Float32Array(e)),y=n(t,new Uint16Array(r),t.ELEMENT_ARRAY_BUFFER),x=i(t,[{buffer:v,type:t.FLOAT,size:3,offset:0,stride:24},{buffer:v,type:t.FLOAT,size:3,offset:12,stride:24}],y),b=a(t);return b.attributes.position.location=0,b.attributes.normal.location=1,new o(t,v,x,b)};var n=t("gl-buffer"),i=t("gl-vao"),a=t("./shaders").bg;function o(t,e,r,n){this.gl=t,this.buffer=e,this.vao=r,this.shader=n}var s=o.prototype;s.draw=function(t,e,r,n,i,a){for(var o=!1,s=0;s<3;++s)o=o||i[s];if(o){var l=this.gl;l.enable(l.POLYGON_OFFSET_FILL),l.polygonOffset(1,2),this.shader.bind(),this.shader.uniforms={model:t,view:e,projection:r,bounds:n,enable:i,colors:a},this.vao.bind(),this.vao.draw(this.gl.TRIANGLES,36),this.vao.unbind(),l.disable(l.POLYGON_OFFSET_FILL)}},s.dispose=function(){this.vao.dispose(),this.buffer.dispose(),this.shader.dispose()}},{"./shaders":74,"gl-buffer":78,"gl-vao":150}],72:[function(t,e,r){"use strict";e.exports=function(t,e,r,a,p){i(s,e,t),i(s,r,s);for(var y=0,x=0;x<2;++x){u[2]=a[x][2];for(var b=0;b<2;++b){u[1]=a[b][1];for(var _=0;_<2;++_)u[0]=a[_][0],h(l[y],u,s),y+=1}}var w=-1;for(x=0;x<8;++x){for(var T=l[x][3],k=0;k<3;++k)c[x][k]=l[x][k]/T;p&&(c[x][2]*=-1),T<0&&(w<0||c[x][2]E&&(w|=1<E&&(w|=1<c[x][1])&&(R=x);var F=-1;for(x=0;x<3;++x){if((N=R^1<c[B][0]&&(B=N)}var j=m;j[0]=j[1]=j[2]=0,j[n.log2(F^R)]=R&F,j[n.log2(R^B)]=R&B;var U=7^B;U===w||U===D?(U=7^F,j[n.log2(B^U)]=U&B):j[n.log2(F^U)]=U&F;var V=g,H=w;for(A=0;A<3;++A)V[A]=H&1< HALF_PI) && (b <= ONE_AND_HALF_PI)) ?\n b - PI :\n b;\n}\n\nfloat look_horizontal_or_vertical(float a, float ratio) {\n // ratio controls the ratio between being horizontal to (vertical + horizontal)\n // if ratio is set to 0.5 then it is 50%, 50%.\n // when using a higher ratio e.g. 0.75 the result would\n // likely be more horizontal than vertical.\n\n float b = positive_angle(a);\n\n return\n (b < ( ratio) * HALF_PI) ? 0.0 :\n (b < (2.0 - ratio) * HALF_PI) ? -HALF_PI :\n (b < (2.0 + ratio) * HALF_PI) ? 0.0 :\n (b < (4.0 - ratio) * HALF_PI) ? HALF_PI :\n 0.0;\n}\n\nfloat roundTo(float a, float b) {\n return float(b * floor((a + 0.5 * b) / b));\n}\n\nfloat look_round_n_directions(float a, int n) {\n float b = positive_angle(a);\n float div = TWO_PI / float(n);\n float c = roundTo(b, div);\n return look_upwards(c);\n}\n\nfloat applyAlignOption(float rawAngle, float delta) {\n return\n (option > 2) ? look_round_n_directions(rawAngle + delta, option) : // option 3-n: round to n directions\n (option == 2) ? look_horizontal_or_vertical(rawAngle + delta, hv_ratio) : // horizontal or vertical\n (option == 1) ? rawAngle + delta : // use free angle, and flip to align with one direction of the axis\n (option == 0) ? look_upwards(rawAngle) : // use free angle, and stay upwards\n (option ==-1) ? 0.0 : // useful for backward compatibility, all texts remains horizontal\n rawAngle; // otherwise return back raw input angle\n}\n\nbool isAxisTitle = (axis.x == 0.0) &&\n (axis.y == 0.0) &&\n (axis.z == 0.0);\n\nvoid main() {\n //Compute world offset\n float axisDistance = position.z;\n vec3 dataPosition = axisDistance * axis + offset;\n\n float beta = angle; // i.e. user defined attributes for each tick\n\n float axisAngle;\n float clipAngle;\n float flip;\n\n if (enableAlign) {\n axisAngle = (isAxisTitle) ? HALF_PI :\n computeViewAngle(dataPosition, dataPosition + axis);\n clipAngle = computeViewAngle(dataPosition, dataPosition + alignDir);\n\n axisAngle += (sin(axisAngle) < 0.0) ? PI : 0.0;\n clipAngle += (sin(clipAngle) < 0.0) ? PI : 0.0;\n\n flip = (dot(vec2(cos(axisAngle), sin(axisAngle)),\n vec2(sin(clipAngle),-cos(clipAngle))) > 0.0) ? 1.0 : 0.0;\n\n beta += applyAlignOption(clipAngle, flip * PI);\n }\n\n //Compute plane offset\n vec2 planeCoord = position.xy * pixelScale;\n\n mat2 planeXform = scale * mat2(\n cos(beta), sin(beta),\n -sin(beta), cos(beta)\n );\n\n vec2 viewOffset = 2.0 * planeXform * planeCoord / resolution;\n\n //Compute clip position\n vec3 clipPosition = project(dataPosition);\n\n //Apply text offset in clip coordinates\n clipPosition += vec3(viewOffset, 0.0);\n\n //Done\n gl_Position = vec4(clipPosition, 1.0);\n}"]),l=n(["precision highp float;\n#define GLSLIFY 1\n\nuniform vec4 color;\nvoid main() {\n gl_FragColor = color;\n}"]);r.text=function(t){return i(t,s,l,null,[{name:"position",type:"vec3"}])};var c=n(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\nattribute vec3 normal;\n\nuniform mat4 model, view, projection;\nuniform vec3 enable;\nuniform vec3 bounds[2];\n\nvarying vec3 colorChannel;\n\nvoid main() {\n\n vec3 signAxis = sign(bounds[1] - bounds[0]);\n\n vec3 realNormal = signAxis * normal;\n\n if(dot(realNormal, enable) > 0.0) {\n vec3 minRange = min(bounds[0], bounds[1]);\n vec3 maxRange = max(bounds[0], bounds[1]);\n vec3 nPosition = mix(minRange, maxRange, 0.5 * (position + 1.0));\n gl_Position = projection * view * model * vec4(nPosition, 1.0);\n } else {\n gl_Position = vec4(0,0,0,0);\n }\n\n colorChannel = abs(realNormal);\n}"]),u=n(["precision highp float;\n#define GLSLIFY 1\n\nuniform vec4 colors[3];\n\nvarying vec3 colorChannel;\n\nvoid main() {\n gl_FragColor = colorChannel.x * colors[0] +\n colorChannel.y * colors[1] +\n colorChannel.z * colors[2];\n}"]);r.bg=function(t){return i(t,c,u,null,[{name:"position",type:"vec3"},{name:"normal",type:"vec3"}])}},{"gl-shader":132,glslify:231}],75:[function(t,e,r){(function(r){(function(){"use strict";e.exports=function(t,e,r,a,s,l){var u=n(t),f=i(t,[{buffer:u,size:3}]),h=o(t);h.attributes.position.location=0;var p=new c(t,h,u,f);return p.update(e,r,a,s,l),p};var n=t("gl-buffer"),i=t("gl-vao"),a=t("vectorize-text"),o=t("./shaders").text,s=window||r.global||{},l=s.__TEXT_CACHE||{};s.__TEXT_CACHE={};function c(t,e,r,n){this.gl=t,this.shader=e,this.buffer=r,this.vao=n,this.tickOffset=this.tickCount=this.labelOffset=this.labelCount=null}var u=c.prototype,f=[0,0];u.bind=function(t,e,r,n){this.vao.bind(),this.shader.bind();var i=this.shader.uniforms;i.model=t,i.view=e,i.projection=r,i.pixelScale=n,f[0]=this.gl.drawingBufferWidth,f[1]=this.gl.drawingBufferHeight,this.shader.uniforms.resolution=f},u.unbind=function(){this.vao.unbind()},u.update=function(t,e,r,n,i){var o=[];function s(t,e,r,n,i,s){var c=l[r];c||(c=l[r]={});var u=c[e];u||(u=c[e]=function(t,e){try{return a(t,e)}catch(e){return console.warn('error vectorizing text:"'+t+'" error:',e),{cells:[],positions:[]}}}(e,{triangles:!0,font:r,textAlign:"center",textBaseline:"middle",lineSpacing:i,styletags:s}));for(var f=(n||12)/12,h=u.positions,p=u.cells,d=0,m=p.length;d=0;--v){var y=h[g[v]];o.push(f*y[0],-f*y[1],t)}}for(var c=[0,0,0],u=[0,0,0],f=[0,0,0],h=[0,0,0],p={breaklines:!0,bolds:!0,italics:!0,subscripts:!0,superscripts:!0},d=0;d<3;++d){f[d]=o.length/3|0,s(.5*(t[0][d]+t[1][d]),e[d],r[d],12,1.25,p),h[d]=(o.length/3|0)-f[d],c[d]=o.length/3|0;for(var m=0;m=0&&(i=r.length-n-1);var a=Math.pow(10,i),o=Math.round(t*e*a),s=o+"";if(s.indexOf("e")>=0)return s;var l=o/a,c=o%a;o<0?(l=0|-Math.ceil(l),c=0|-c):(l=0|Math.floor(l),c|=0);var u=""+l;if(o<0&&(u="-"+u),i){for(var f=""+c;f.length=t[0][i];--o)a.push({x:o*e[i],text:n(e[i],o)});r.push(a)}return r},r.equal=function(t,e){for(var r=0;r<3;++r){if(t[r].length!==e[r].length)return!1;for(var n=0;nr)throw new Error("gl-buffer: If resizing buffer, must not specify offset");return t.bufferSubData(e,a,i),r}function u(t,e){for(var r=n.malloc(t.length,e),i=t.length,a=0;a=0;--n){if(e[n]!==r)return!1;r*=t[n]}return!0}(t.shape,t.stride))0===t.offset&&t.data.length===t.shape[0]?this.length=c(this.gl,this.type,this.length,this.usage,t.data,e):this.length=c(this.gl,this.type,this.length,this.usage,t.data.subarray(t.offset,t.shape[0]),e);else{var s=n.malloc(t.size,r),l=a(s,t.shape);i.assign(l,t),this.length=c(this.gl,this.type,this.length,this.usage,e<0?s:s.subarray(0,t.size),e),n.free(s)}}else if(Array.isArray(t)){var f;f=this.type===this.gl.ELEMENT_ARRAY_BUFFER?u(t,"uint16"):u(t,"float32"),this.length=c(this.gl,this.type,this.length,this.usage,e<0?f:f.subarray(0,t.length),e),n.free(f)}else if("object"==typeof t&&"number"==typeof t.length)this.length=c(this.gl,this.type,this.length,this.usage,t,e);else{if("number"!=typeof t&&void 0!==t)throw new Error("gl-buffer: Invalid data type");if(e>=0)throw new Error("gl-buffer: Cannot specify offset when resizing buffer");(t|=0)<=0&&(t=1),this.gl.bufferData(this.type,0|t,this.usage),this.length=t}},e.exports=function(t,e,r,n){if(r=r||t.ARRAY_BUFFER,n=n||t.DYNAMIC_DRAW,r!==t.ARRAY_BUFFER&&r!==t.ELEMENT_ARRAY_BUFFER)throw new Error("gl-buffer: Invalid type for webgl buffer, must be either gl.ARRAY_BUFFER or gl.ELEMENT_ARRAY_BUFFER");if(n!==t.DYNAMIC_DRAW&&n!==t.STATIC_DRAW&&n!==t.STREAM_DRAW)throw new Error("gl-buffer: Invalid usage for buffer, must be either gl.DYNAMIC_DRAW, gl.STATIC_DRAW or gl.STREAM_DRAW");var i=t.createBuffer(),a=new s(t,r,i,0,n);return a.update(e),a}},{ndarray:259,"ndarray-ops":254,"typedarray-pool":308}],79:[function(t,e,r){"use strict";var n=t("gl-vec3");e.exports=function(t,e){var r=t.positions,i=t.vectors,a={positions:[],vertexIntensity:[],vertexIntensityBounds:t.vertexIntensityBounds,vectors:[],cells:[],coneOffset:t.coneOffset,colormap:t.colormap};if(0===t.positions.length)return e&&(e[0]=[0,0,0],e[1]=[0,0,0]),a;for(var o=0,s=1/0,l=-1/0,c=1/0,u=-1/0,f=1/0,h=-1/0,p=null,d=null,m=[],g=1/0,v=!1,y=0;yo&&(o=n.length(b)),y){var _=2*n.distance(p,x)/(n.length(d)+n.length(b));_?(g=Math.min(g,_),v=!1):v=!0}v||(p=x,d=b),m.push(b)}var w=[s,c,f],T=[l,u,h];e&&(e[0]=w,e[1]=T),0===o&&(o=1);var k=1/o;isFinite(g)||(g=1),a.vectorScale=g;var A=t.coneSize||.5;t.absoluteConeSize&&(A=t.absoluteConeSize*k),a.coneScale=A;y=0;for(var M=0;y=1},p.isTransparent=function(){return this.opacity<1},p.pickSlots=1,p.setPickBase=function(t){this.pickId=t},p.update=function(t){t=t||{};var e=this.gl;this.dirty=!0,"lightPosition"in t&&(this.lightPosition=t.lightPosition),"opacity"in t&&(this.opacity=t.opacity),"ambient"in t&&(this.ambientLight=t.ambient),"diffuse"in t&&(this.diffuseLight=t.diffuse),"specular"in t&&(this.specularLight=t.specular),"roughness"in t&&(this.roughness=t.roughness),"fresnel"in t&&(this.fresnel=t.fresnel),void 0!==t.tubeScale&&(this.tubeScale=t.tubeScale),void 0!==t.vectorScale&&(this.vectorScale=t.vectorScale),void 0!==t.coneScale&&(this.coneScale=t.coneScale),void 0!==t.coneOffset&&(this.coneOffset=t.coneOffset),t.colormap&&(this.texture.shape=[256,256],this.texture.minFilter=e.LINEAR_MIPMAP_LINEAR,this.texture.magFilter=e.LINEAR,this.texture.setPixels(function(t){for(var e=u({colormap:t,nshades:256,format:"rgba"}),r=new Uint8Array(1024),n=0;n<256;++n){for(var i=e[n],a=0;a<3;++a)r[4*n+a]=i[a];r[4*n+3]=255*i[3]}return c(r,[256,256,4],[4,0,1])}(t.colormap)),this.texture.generateMipmap());var r=t.cells,n=t.positions,i=t.vectors;if(n&&r&&i){var a=[],o=[],s=[],l=[],f=[];this.cells=r,this.positions=n,this.vectors=i;var h=t.meshColor||[1,1,1,1],p=t.vertexIntensity,d=1/0,m=-1/0;if(p)if(t.vertexIntensityBounds)d=+t.vertexIntensityBounds[0],m=+t.vertexIntensityBounds[1];else for(var g=0;g0){var m=this.triShader;m.bind(),m.uniforms=c,this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind()}},p.drawPick=function(t){t=t||{};for(var e=this.gl,r=t.model||f,n=t.view||f,i=t.projection||f,a=[[-1e6,-1e6,-1e6],[1e6,1e6,1e6]],o=0;o<3;++o)a[0][o]=Math.max(a[0][o],this.clipBounds[0][o]),a[1][o]=Math.min(a[1][o],this.clipBounds[1][o]);this._model=[].slice.call(r),this._view=[].slice.call(n),this._projection=[].slice.call(i),this._resolution=[e.drawingBufferWidth,e.drawingBufferHeight];var s={model:r,view:n,projection:i,clipBounds:a,tubeScale:this.tubeScale,vectorScale:this.vectorScale,coneScale:this.coneScale,coneOffset:this.coneOffset,pickId:this.pickId/255},l=this.pickShader;l.bind(),l.uniforms=s,this.triangleCount>0&&(this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind())},p.pick=function(t){if(!t)return null;if(t.id!==this.pickId)return null;var e=t.value[0]+256*t.value[1]+65536*t.value[2],r=this.cells[e],n=this.positions[r[1]].slice(0,3),i={position:n,dataCoordinate:n,index:Math.floor(r[1]/48)};return"cone"===this.traceType?i.index=Math.floor(r[1]/48):"streamtube"===this.traceType&&(i.intensity=this.intensity[r[1]],i.velocity=this.vectors[r[1]].slice(0,3),i.divergence=this.vectors[r[1]][3],i.index=e),i},p.dispose=function(){this.texture.dispose(),this.triShader.dispose(),this.pickShader.dispose(),this.triangleVAO.dispose(),this.trianglePositions.dispose(),this.triangleVectors.dispose(),this.triangleColors.dispose(),this.triangleUVs.dispose(),this.triangleIds.dispose()},e.exports=function(t,e,r){var n=r.shaders;1===arguments.length&&(t=(e=t).gl);var s=d(t,n),l=m(t,n),u=o(t,c(new Uint8Array([255,255,255,255]),[1,1,4]));u.generateMipmap(),u.minFilter=t.LINEAR_MIPMAP_LINEAR,u.magFilter=t.LINEAR;var f=i(t),p=i(t),g=i(t),v=i(t),y=i(t),x=a(t,[{buffer:f,type:t.FLOAT,size:4},{buffer:y,type:t.UNSIGNED_BYTE,size:4,normalized:!0},{buffer:g,type:t.FLOAT,size:4},{buffer:v,type:t.FLOAT,size:2},{buffer:p,type:t.FLOAT,size:4}]),b=new h(t,u,s,l,f,p,y,g,v,x,r.traceType||"cone");return b.update(e),b}},{colormap:53,"gl-buffer":78,"gl-mat4/invert":98,"gl-mat4/multiply":100,"gl-shader":132,"gl-texture2d":146,"gl-vao":150,ndarray:259}],81:[function(t,e,r){var n=t("glslify"),i=n(["precision highp float;\n\nprecision highp float;\n#define GLSLIFY 1\n\nvec3 getOrthogonalVector(vec3 v) {\n // Return up-vector for only-z vector.\n // Return ax + by + cz = 0, a point that lies on the plane that has v as a normal and that isn't (0,0,0).\n // From the above if-statement we have ||a|| > 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the cone vertex and normal at the given index.\n//\n// The returned vertex is for a cone with its top at origin and height of 1.0,\n// pointing in the direction of the vector attribute.\n//\n// Each cone is made up of a top vertex, a center base vertex and base perimeter vertices.\n// These vertices are used to make up the triangles of the cone by the following:\n// segment + 0 top vertex\n// segment + 1 perimeter vertex a+1\n// segment + 2 perimeter vertex a\n// segment + 3 center base vertex\n// segment + 4 perimeter vertex a\n// segment + 5 perimeter vertex a+1\n// Where segment is the number of the radial segment * 6 and a is the angle at that radial segment.\n// To go from index to segment, floor(index / 6)\n// To go from segment to angle, 2*pi * (segment/segmentCount)\n// To go from index to segment index, index - (segment*6)\n//\nvec3 getConePosition(vec3 d, float rawIndex, float coneOffset, out vec3 normal) {\n\n const float segmentCount = 8.0;\n\n float index = rawIndex - floor(rawIndex /\n (segmentCount * 6.0)) *\n (segmentCount * 6.0);\n\n float segment = floor(0.001 + index/6.0);\n float segmentIndex = index - (segment*6.0);\n\n normal = -normalize(d);\n\n if (segmentIndex > 2.99 && segmentIndex < 3.01) {\n return mix(vec3(0.0), -d, coneOffset);\n }\n\n float nextAngle = (\n (segmentIndex > 0.99 && segmentIndex < 1.01) ||\n (segmentIndex > 4.99 && segmentIndex < 5.01)\n ) ? 1.0 : 0.0;\n float angle = 2.0 * 3.14159 * ((segment + nextAngle) / segmentCount);\n\n vec3 v1 = mix(d, vec3(0.0), coneOffset);\n vec3 v2 = v1 - d;\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d)*0.25;\n vec3 y = v * sin(angle) * length(d)*0.25;\n vec3 v3 = v2 + x + y;\n if (segmentIndex < 3.0) {\n vec3 tx = u * sin(angle);\n vec3 ty = v * -cos(angle);\n vec3 tangent = tx + ty;\n normal = normalize(cross(v3 - v1, tangent));\n }\n\n if (segmentIndex == 0.0) {\n return mix(d, vec3(0.0), coneOffset);\n }\n return v3;\n}\n\nattribute vec3 vector;\nattribute vec4 color, position;\nattribute vec2 uv;\n\nuniform float vectorScale, coneScale, coneOffset;\nuniform mat4 model, view, projection, inverseModel;\nuniform vec3 eyePosition, lightPosition;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n // Scale the vector magnitude to stay constant with\n // model & view changes.\n vec3 normal;\n vec3 XYZ = getConePosition(mat3(model) * ((vectorScale * coneScale) * vector), position.w, coneOffset, normal);\n vec4 conePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * conePosition;\n cameraCoordinate.xyz /= cameraCoordinate.w;\n f_lightDirection = lightPosition - cameraCoordinate.xyz;\n f_eyeDirection = eyePosition - cameraCoordinate.xyz;\n f_normal = normalize((vec4(normal, 0.0) * inverseModel).xyz);\n\n // vec4 m_position = model * vec4(conePosition, 1.0);\n vec4 t_position = view * conePosition;\n gl_Position = projection * t_position;\n\n f_color = color;\n f_data = conePosition.xyz;\n f_position = position.xyz;\n f_uv = uv;\n}\n"]),a=n(["#extension GL_OES_standard_derivatives : enable\n\nprecision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat cookTorranceSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness,\n float fresnel) {\n\n float VdotN = max(dot(viewDirection, surfaceNormal), 0.0);\n float LdotN = max(dot(lightDirection, surfaceNormal), 0.0);\n\n //Half angle vector\n vec3 H = normalize(lightDirection + viewDirection);\n\n //Geometric term\n float NdotH = max(dot(surfaceNormal, H), 0.0);\n float VdotH = max(dot(viewDirection, H), 0.000001);\n float LdotH = max(dot(lightDirection, H), 0.000001);\n float G1 = (2.0 * NdotH * VdotN) / VdotH;\n float G2 = (2.0 * NdotH * LdotN) / LdotH;\n float G = min(1.0, min(G1, G2));\n \n //Distribution term\n float D = beckmannDistribution(NdotH, roughness);\n\n //Fresnel term\n float F = pow(1.0 - VdotN, fresnel);\n\n //Multiply terms and done\n return G * F * D / max(3.14159265 * VdotN, 0.000001);\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float roughness, fresnel, kambient, kdiffuse, kspecular, opacity;\nuniform sampler2D texture;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n vec3 N = normalize(f_normal);\n vec3 L = normalize(f_lightDirection);\n vec3 V = normalize(f_eyeDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = min(1.0, max(0.0, cookTorranceSpecular(L, V, N, roughness, fresnel)));\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n vec4 surfaceColor = f_color * texture2D(texture, f_uv);\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = litColor * opacity;\n}\n"]),o=n(["precision highp float;\n\nprecision highp float;\n#define GLSLIFY 1\n\nvec3 getOrthogonalVector(vec3 v) {\n // Return up-vector for only-z vector.\n // Return ax + by + cz = 0, a point that lies on the plane that has v as a normal and that isn't (0,0,0).\n // From the above if-statement we have ||a|| > 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the cone vertex and normal at the given index.\n//\n// The returned vertex is for a cone with its top at origin and height of 1.0,\n// pointing in the direction of the vector attribute.\n//\n// Each cone is made up of a top vertex, a center base vertex and base perimeter vertices.\n// These vertices are used to make up the triangles of the cone by the following:\n// segment + 0 top vertex\n// segment + 1 perimeter vertex a+1\n// segment + 2 perimeter vertex a\n// segment + 3 center base vertex\n// segment + 4 perimeter vertex a\n// segment + 5 perimeter vertex a+1\n// Where segment is the number of the radial segment * 6 and a is the angle at that radial segment.\n// To go from index to segment, floor(index / 6)\n// To go from segment to angle, 2*pi * (segment/segmentCount)\n// To go from index to segment index, index - (segment*6)\n//\nvec3 getConePosition(vec3 d, float rawIndex, float coneOffset, out vec3 normal) {\n\n const float segmentCount = 8.0;\n\n float index = rawIndex - floor(rawIndex /\n (segmentCount * 6.0)) *\n (segmentCount * 6.0);\n\n float segment = floor(0.001 + index/6.0);\n float segmentIndex = index - (segment*6.0);\n\n normal = -normalize(d);\n\n if (segmentIndex > 2.99 && segmentIndex < 3.01) {\n return mix(vec3(0.0), -d, coneOffset);\n }\n\n float nextAngle = (\n (segmentIndex > 0.99 && segmentIndex < 1.01) ||\n (segmentIndex > 4.99 && segmentIndex < 5.01)\n ) ? 1.0 : 0.0;\n float angle = 2.0 * 3.14159 * ((segment + nextAngle) / segmentCount);\n\n vec3 v1 = mix(d, vec3(0.0), coneOffset);\n vec3 v2 = v1 - d;\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d)*0.25;\n vec3 y = v * sin(angle) * length(d)*0.25;\n vec3 v3 = v2 + x + y;\n if (segmentIndex < 3.0) {\n vec3 tx = u * sin(angle);\n vec3 ty = v * -cos(angle);\n vec3 tangent = tx + ty;\n normal = normalize(cross(v3 - v1, tangent));\n }\n\n if (segmentIndex == 0.0) {\n return mix(d, vec3(0.0), coneOffset);\n }\n return v3;\n}\n\nattribute vec4 vector;\nattribute vec4 position;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform float vectorScale, coneScale, coneOffset;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n vec3 normal;\n vec3 XYZ = getConePosition(mat3(model) * ((vectorScale * coneScale) * vector.xyz), position.w, coneOffset, normal);\n vec4 conePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n gl_Position = projection * view * conePosition;\n f_id = id;\n f_position = position.xyz;\n}\n"]),s=n(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n\n gl_FragColor = vec4(pickId, f_id.xyz);\n}"]);r.meshShader={vertex:i,fragment:a,attributes:[{name:"position",type:"vec4"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"},{name:"vector",type:"vec3"}]},r.pickShader={vertex:o,fragment:s,attributes:[{name:"position",type:"vec4"},{name:"id",type:"vec4"},{name:"vector",type:"vec3"}]}},{glslify:231}],82:[function(t,e,r){e.exports={0:"NONE",1:"ONE",2:"LINE_LOOP",3:"LINE_STRIP",4:"TRIANGLES",5:"TRIANGLE_STRIP",6:"TRIANGLE_FAN",256:"DEPTH_BUFFER_BIT",512:"NEVER",513:"LESS",514:"EQUAL",515:"LEQUAL",516:"GREATER",517:"NOTEQUAL",518:"GEQUAL",519:"ALWAYS",768:"SRC_COLOR",769:"ONE_MINUS_SRC_COLOR",770:"SRC_ALPHA",771:"ONE_MINUS_SRC_ALPHA",772:"DST_ALPHA",773:"ONE_MINUS_DST_ALPHA",774:"DST_COLOR",775:"ONE_MINUS_DST_COLOR",776:"SRC_ALPHA_SATURATE",1024:"STENCIL_BUFFER_BIT",1028:"FRONT",1029:"BACK",1032:"FRONT_AND_BACK",1280:"INVALID_ENUM",1281:"INVALID_VALUE",1282:"INVALID_OPERATION",1285:"OUT_OF_MEMORY",1286:"INVALID_FRAMEBUFFER_OPERATION",2304:"CW",2305:"CCW",2849:"LINE_WIDTH",2884:"CULL_FACE",2885:"CULL_FACE_MODE",2886:"FRONT_FACE",2928:"DEPTH_RANGE",2929:"DEPTH_TEST",2930:"DEPTH_WRITEMASK",2931:"DEPTH_CLEAR_VALUE",2932:"DEPTH_FUNC",2960:"STENCIL_TEST",2961:"STENCIL_CLEAR_VALUE",2962:"STENCIL_FUNC",2963:"STENCIL_VALUE_MASK",2964:"STENCIL_FAIL",2965:"STENCIL_PASS_DEPTH_FAIL",2966:"STENCIL_PASS_DEPTH_PASS",2967:"STENCIL_REF",2968:"STENCIL_WRITEMASK",2978:"VIEWPORT",3024:"DITHER",3042:"BLEND",3088:"SCISSOR_BOX",3089:"SCISSOR_TEST",3106:"COLOR_CLEAR_VALUE",3107:"COLOR_WRITEMASK",3317:"UNPACK_ALIGNMENT",3333:"PACK_ALIGNMENT",3379:"MAX_TEXTURE_SIZE",3386:"MAX_VIEWPORT_DIMS",3408:"SUBPIXEL_BITS",3410:"RED_BITS",3411:"GREEN_BITS",3412:"BLUE_BITS",3413:"ALPHA_BITS",3414:"DEPTH_BITS",3415:"STENCIL_BITS",3553:"TEXTURE_2D",4352:"DONT_CARE",4353:"FASTEST",4354:"NICEST",5120:"BYTE",5121:"UNSIGNED_BYTE",5122:"SHORT",5123:"UNSIGNED_SHORT",5124:"INT",5125:"UNSIGNED_INT",5126:"FLOAT",5386:"INVERT",5890:"TEXTURE",6401:"STENCIL_INDEX",6402:"DEPTH_COMPONENT",6406:"ALPHA",6407:"RGB",6408:"RGBA",6409:"LUMINANCE",6410:"LUMINANCE_ALPHA",7680:"KEEP",7681:"REPLACE",7682:"INCR",7683:"DECR",7936:"VENDOR",7937:"RENDERER",7938:"VERSION",9728:"NEAREST",9729:"LINEAR",9984:"NEAREST_MIPMAP_NEAREST",9985:"LINEAR_MIPMAP_NEAREST",9986:"NEAREST_MIPMAP_LINEAR",9987:"LINEAR_MIPMAP_LINEAR",10240:"TEXTURE_MAG_FILTER",10241:"TEXTURE_MIN_FILTER",10242:"TEXTURE_WRAP_S",10243:"TEXTURE_WRAP_T",10497:"REPEAT",10752:"POLYGON_OFFSET_UNITS",16384:"COLOR_BUFFER_BIT",32769:"CONSTANT_COLOR",32770:"ONE_MINUS_CONSTANT_COLOR",32771:"CONSTANT_ALPHA",32772:"ONE_MINUS_CONSTANT_ALPHA",32773:"BLEND_COLOR",32774:"FUNC_ADD",32777:"BLEND_EQUATION_RGB",32778:"FUNC_SUBTRACT",32779:"FUNC_REVERSE_SUBTRACT",32819:"UNSIGNED_SHORT_4_4_4_4",32820:"UNSIGNED_SHORT_5_5_5_1",32823:"POLYGON_OFFSET_FILL",32824:"POLYGON_OFFSET_FACTOR",32854:"RGBA4",32855:"RGB5_A1",32873:"TEXTURE_BINDING_2D",32926:"SAMPLE_ALPHA_TO_COVERAGE",32928:"SAMPLE_COVERAGE",32936:"SAMPLE_BUFFERS",32937:"SAMPLES",32938:"SAMPLE_COVERAGE_VALUE",32939:"SAMPLE_COVERAGE_INVERT",32968:"BLEND_DST_RGB",32969:"BLEND_SRC_RGB",32970:"BLEND_DST_ALPHA",32971:"BLEND_SRC_ALPHA",33071:"CLAMP_TO_EDGE",33170:"GENERATE_MIPMAP_HINT",33189:"DEPTH_COMPONENT16",33306:"DEPTH_STENCIL_ATTACHMENT",33635:"UNSIGNED_SHORT_5_6_5",33648:"MIRRORED_REPEAT",33901:"ALIASED_POINT_SIZE_RANGE",33902:"ALIASED_LINE_WIDTH_RANGE",33984:"TEXTURE0",33985:"TEXTURE1",33986:"TEXTURE2",33987:"TEXTURE3",33988:"TEXTURE4",33989:"TEXTURE5",33990:"TEXTURE6",33991:"TEXTURE7",33992:"TEXTURE8",33993:"TEXTURE9",33994:"TEXTURE10",33995:"TEXTURE11",33996:"TEXTURE12",33997:"TEXTURE13",33998:"TEXTURE14",33999:"TEXTURE15",34e3:"TEXTURE16",34001:"TEXTURE17",34002:"TEXTURE18",34003:"TEXTURE19",34004:"TEXTURE20",34005:"TEXTURE21",34006:"TEXTURE22",34007:"TEXTURE23",34008:"TEXTURE24",34009:"TEXTURE25",34010:"TEXTURE26",34011:"TEXTURE27",34012:"TEXTURE28",34013:"TEXTURE29",34014:"TEXTURE30",34015:"TEXTURE31",34016:"ACTIVE_TEXTURE",34024:"MAX_RENDERBUFFER_SIZE",34041:"DEPTH_STENCIL",34055:"INCR_WRAP",34056:"DECR_WRAP",34067:"TEXTURE_CUBE_MAP",34068:"TEXTURE_BINDING_CUBE_MAP",34069:"TEXTURE_CUBE_MAP_POSITIVE_X",34070:"TEXTURE_CUBE_MAP_NEGATIVE_X",34071:"TEXTURE_CUBE_MAP_POSITIVE_Y",34072:"TEXTURE_CUBE_MAP_NEGATIVE_Y",34073:"TEXTURE_CUBE_MAP_POSITIVE_Z",34074:"TEXTURE_CUBE_MAP_NEGATIVE_Z",34076:"MAX_CUBE_MAP_TEXTURE_SIZE",34338:"VERTEX_ATTRIB_ARRAY_ENABLED",34339:"VERTEX_ATTRIB_ARRAY_SIZE",34340:"VERTEX_ATTRIB_ARRAY_STRIDE",34341:"VERTEX_ATTRIB_ARRAY_TYPE",34342:"CURRENT_VERTEX_ATTRIB",34373:"VERTEX_ATTRIB_ARRAY_POINTER",34466:"NUM_COMPRESSED_TEXTURE_FORMATS",34467:"COMPRESSED_TEXTURE_FORMATS",34660:"BUFFER_SIZE",34661:"BUFFER_USAGE",34816:"STENCIL_BACK_FUNC",34817:"STENCIL_BACK_FAIL",34818:"STENCIL_BACK_PASS_DEPTH_FAIL",34819:"STENCIL_BACK_PASS_DEPTH_PASS",34877:"BLEND_EQUATION_ALPHA",34921:"MAX_VERTEX_ATTRIBS",34922:"VERTEX_ATTRIB_ARRAY_NORMALIZED",34930:"MAX_TEXTURE_IMAGE_UNITS",34962:"ARRAY_BUFFER",34963:"ELEMENT_ARRAY_BUFFER",34964:"ARRAY_BUFFER_BINDING",34965:"ELEMENT_ARRAY_BUFFER_BINDING",34975:"VERTEX_ATTRIB_ARRAY_BUFFER_BINDING",35040:"STREAM_DRAW",35044:"STATIC_DRAW",35048:"DYNAMIC_DRAW",35632:"FRAGMENT_SHADER",35633:"VERTEX_SHADER",35660:"MAX_VERTEX_TEXTURE_IMAGE_UNITS",35661:"MAX_COMBINED_TEXTURE_IMAGE_UNITS",35663:"SHADER_TYPE",35664:"FLOAT_VEC2",35665:"FLOAT_VEC3",35666:"FLOAT_VEC4",35667:"INT_VEC2",35668:"INT_VEC3",35669:"INT_VEC4",35670:"BOOL",35671:"BOOL_VEC2",35672:"BOOL_VEC3",35673:"BOOL_VEC4",35674:"FLOAT_MAT2",35675:"FLOAT_MAT3",35676:"FLOAT_MAT4",35678:"SAMPLER_2D",35680:"SAMPLER_CUBE",35712:"DELETE_STATUS",35713:"COMPILE_STATUS",35714:"LINK_STATUS",35715:"VALIDATE_STATUS",35716:"INFO_LOG_LENGTH",35717:"ATTACHED_SHADERS",35718:"ACTIVE_UNIFORMS",35719:"ACTIVE_UNIFORM_MAX_LENGTH",35720:"SHADER_SOURCE_LENGTH",35721:"ACTIVE_ATTRIBUTES",35722:"ACTIVE_ATTRIBUTE_MAX_LENGTH",35724:"SHADING_LANGUAGE_VERSION",35725:"CURRENT_PROGRAM",36003:"STENCIL_BACK_REF",36004:"STENCIL_BACK_VALUE_MASK",36005:"STENCIL_BACK_WRITEMASK",36006:"FRAMEBUFFER_BINDING",36007:"RENDERBUFFER_BINDING",36048:"FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE",36049:"FRAMEBUFFER_ATTACHMENT_OBJECT_NAME",36050:"FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL",36051:"FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE",36053:"FRAMEBUFFER_COMPLETE",36054:"FRAMEBUFFER_INCOMPLETE_ATTACHMENT",36055:"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT",36057:"FRAMEBUFFER_INCOMPLETE_DIMENSIONS",36061:"FRAMEBUFFER_UNSUPPORTED",36064:"COLOR_ATTACHMENT0",36096:"DEPTH_ATTACHMENT",36128:"STENCIL_ATTACHMENT",36160:"FRAMEBUFFER",36161:"RENDERBUFFER",36162:"RENDERBUFFER_WIDTH",36163:"RENDERBUFFER_HEIGHT",36164:"RENDERBUFFER_INTERNAL_FORMAT",36168:"STENCIL_INDEX8",36176:"RENDERBUFFER_RED_SIZE",36177:"RENDERBUFFER_GREEN_SIZE",36178:"RENDERBUFFER_BLUE_SIZE",36179:"RENDERBUFFER_ALPHA_SIZE",36180:"RENDERBUFFER_DEPTH_SIZE",36181:"RENDERBUFFER_STENCIL_SIZE",36194:"RGB565",36336:"LOW_FLOAT",36337:"MEDIUM_FLOAT",36338:"HIGH_FLOAT",36339:"LOW_INT",36340:"MEDIUM_INT",36341:"HIGH_INT",36346:"SHADER_COMPILER",36347:"MAX_VERTEX_UNIFORM_VECTORS",36348:"MAX_VARYING_VECTORS",36349:"MAX_FRAGMENT_UNIFORM_VECTORS",37440:"UNPACK_FLIP_Y_WEBGL",37441:"UNPACK_PREMULTIPLY_ALPHA_WEBGL",37442:"CONTEXT_LOST_WEBGL",37443:"UNPACK_COLORSPACE_CONVERSION_WEBGL",37444:"BROWSER_DEFAULT_WEBGL"}},{}],83:[function(t,e,r){var n=t("./1.0/numbers");e.exports=function(t){return n[t]}},{"./1.0/numbers":82}],84:[function(t,e,r){"use strict";e.exports=function(t){var e=t.gl,r=n(e),o=i(e,[{buffer:r,type:e.FLOAT,size:3,offset:0,stride:40},{buffer:r,type:e.FLOAT,size:4,offset:12,stride:40},{buffer:r,type:e.FLOAT,size:3,offset:28,stride:40}]),l=a(e);l.attributes.position.location=0,l.attributes.color.location=1,l.attributes.offset.location=2;var c=new s(e,r,o,l);return c.update(t),c};var n=t("gl-buffer"),i=t("gl-vao"),a=t("./shaders/index"),o=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function s(t,e,r,n){this.gl=t,this.shader=n,this.buffer=e,this.vao=r,this.pixelRatio=1,this.bounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.lineWidth=[1,1,1],this.capSize=[10,10,10],this.lineCount=[0,0,0],this.lineOffset=[0,0,0],this.opacity=1,this.hasAlpha=!1}var l=s.prototype;function c(t,e){for(var r=0;r<3;++r)t[0][r]=Math.min(t[0][r],e[r]),t[1][r]=Math.max(t[1][r],e[r])}l.isOpaque=function(){return!this.hasAlpha},l.isTransparent=function(){return this.hasAlpha},l.drawTransparent=l.draw=function(t){var e=this.gl,r=this.shader.uniforms;this.shader.bind();var n=r.view=t.view||o,i=r.projection=t.projection||o;r.model=t.model||o,r.clipBounds=this.clipBounds,r.opacity=this.opacity;var a=n[12],s=n[13],l=n[14],c=n[15],u=(t._ortho||!1?2:1)*this.pixelRatio*(i[3]*a+i[7]*s+i[11]*l+i[15]*c)/e.drawingBufferHeight;this.vao.bind();for(var f=0;f<3;++f)e.lineWidth(this.lineWidth[f]*this.pixelRatio),r.capSize=this.capSize[f]*u,this.lineCount[f]&&e.drawArrays(e.LINES,this.lineOffset[f],this.lineCount[f]);this.vao.unbind()};var u=function(){for(var t=new Array(3),e=0;e<3;++e){for(var r=[],n=1;n<=2;++n)for(var i=-1;i<=1;i+=2){var a=[0,0,0];a[(n+e)%3]=i,r.push(a)}t[e]=r}return t}();function f(t,e,r,n){for(var i=u[n],a=0;a0)(m=u.slice())[s]+=p[1][s],i.push(u[0],u[1],u[2],d[0],d[1],d[2],d[3],0,0,0,m[0],m[1],m[2],d[0],d[1],d[2],d[3],0,0,0),c(this.bounds,m),o+=2+f(i,m,d,s)}}this.lineCount[s]=o-this.lineOffset[s]}this.buffer.update(i)}},l.dispose=function(){this.shader.dispose(),this.buffer.dispose(),this.vao.dispose()}},{"./shaders/index":85,"gl-buffer":78,"gl-vao":150}],85:[function(t,e,r){"use strict";var n=t("glslify"),i=t("gl-shader"),a=n(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position, offset;\nattribute vec4 color;\nuniform mat4 model, view, projection;\nuniform float capSize;\nvarying vec4 fragColor;\nvarying vec3 fragPosition;\n\nvoid main() {\n vec4 worldPosition = model * vec4(position, 1.0);\n worldPosition = (worldPosition / worldPosition.w) + vec4(capSize * offset, 0.0);\n gl_Position = projection * view * worldPosition;\n fragColor = color;\n fragPosition = position;\n}"]),o=n(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float opacity;\nvarying vec3 fragPosition;\nvarying vec4 fragColor;\n\nvoid main() {\n if (\n outOfRange(clipBounds[0], clipBounds[1], fragPosition) ||\n fragColor.a * opacity == 0.\n ) discard;\n\n gl_FragColor = opacity * fragColor;\n}"]);e.exports=function(t){return i(t,a,o,null,[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"offset",type:"vec3"}])}},{"gl-shader":132,glslify:231}],86:[function(t,e,r){"use strict";var n=t("gl-texture2d");e.exports=function(t,e,r,n){i||(i=t.FRAMEBUFFER_UNSUPPORTED,a=t.FRAMEBUFFER_INCOMPLETE_ATTACHMENT,o=t.FRAMEBUFFER_INCOMPLETE_DIMENSIONS,s=t.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT);var c=t.getExtension("WEBGL_draw_buffers");!l&&c&&function(t,e){var r=t.getParameter(e.MAX_COLOR_ATTACHMENTS_WEBGL);l=new Array(r+1);for(var n=0;n<=r;++n){for(var i=new Array(r),a=0;au||r<0||r>u)throw new Error("gl-fbo: Parameters are too large for FBO");var f=1;if("color"in(n=n||{})){if((f=Math.max(0|n.color,0))<0)throw new Error("gl-fbo: Must specify a nonnegative number of colors");if(f>1){if(!c)throw new Error("gl-fbo: Multiple draw buffer extension not supported");if(f>t.getParameter(c.MAX_COLOR_ATTACHMENTS_WEBGL))throw new Error("gl-fbo: Context does not support "+f+" draw buffers")}}var h=t.UNSIGNED_BYTE,p=t.getExtension("OES_texture_float");if(n.float&&f>0){if(!p)throw new Error("gl-fbo: Context does not support floating point textures");h=t.FLOAT}else n.preferFloat&&f>0&&p&&(h=t.FLOAT);var m=!0;"depth"in n&&(m=!!n.depth);var g=!1;"stencil"in n&&(g=!!n.stencil);return new d(t,e,r,h,f,m,g,c)};var i,a,o,s,l=null;function c(t){return[t.getParameter(t.FRAMEBUFFER_BINDING),t.getParameter(t.RENDERBUFFER_BINDING),t.getParameter(t.TEXTURE_BINDING_2D)]}function u(t,e){t.bindFramebuffer(t.FRAMEBUFFER,e[0]),t.bindRenderbuffer(t.RENDERBUFFER,e[1]),t.bindTexture(t.TEXTURE_2D,e[2])}function f(t){switch(t){case i:throw new Error("gl-fbo: Framebuffer unsupported");case a:throw new Error("gl-fbo: Framebuffer incomplete attachment");case o:throw new Error("gl-fbo: Framebuffer incomplete dimensions");case s:throw new Error("gl-fbo: Framebuffer incomplete missing attachment");default:throw new Error("gl-fbo: Framebuffer failed for unspecified reason")}}function h(t,e,r,i,a,o){if(!i)return null;var s=n(t,e,r,a,i);return s.magFilter=t.NEAREST,s.minFilter=t.NEAREST,s.mipSamples=1,s.bind(),t.framebufferTexture2D(t.FRAMEBUFFER,o,t.TEXTURE_2D,s.handle,0),s}function p(t,e,r,n,i){var a=t.createRenderbuffer();return t.bindRenderbuffer(t.RENDERBUFFER,a),t.renderbufferStorage(t.RENDERBUFFER,n,e,r),t.framebufferRenderbuffer(t.FRAMEBUFFER,i,t.RENDERBUFFER,a),a}function d(t,e,r,n,i,a,o,s){this.gl=t,this._shape=[0|e,0|r],this._destroyed=!1,this._ext=s,this.color=new Array(i);for(var d=0;d1&&s.drawBuffersWEBGL(l[o]);var y=r.getExtension("WEBGL_depth_texture");y?d?t.depth=h(r,i,a,y.UNSIGNED_INT_24_8_WEBGL,r.DEPTH_STENCIL,r.DEPTH_STENCIL_ATTACHMENT):m&&(t.depth=h(r,i,a,r.UNSIGNED_SHORT,r.DEPTH_COMPONENT,r.DEPTH_ATTACHMENT)):m&&d?t._depth_rb=p(r,i,a,r.DEPTH_STENCIL,r.DEPTH_STENCIL_ATTACHMENT):m?t._depth_rb=p(r,i,a,r.DEPTH_COMPONENT16,r.DEPTH_ATTACHMENT):d&&(t._depth_rb=p(r,i,a,r.STENCIL_INDEX,r.STENCIL_ATTACHMENT));var x=r.checkFramebufferStatus(r.FRAMEBUFFER);if(x!==r.FRAMEBUFFER_COMPLETE){t._destroyed=!0,r.bindFramebuffer(r.FRAMEBUFFER,null),r.deleteFramebuffer(t.handle),t.handle=null,t.depth&&(t.depth.dispose(),t.depth=null),t._depth_rb&&(r.deleteRenderbuffer(t._depth_rb),t._depth_rb=null);for(v=0;vi||r<0||r>i)throw new Error("gl-fbo: Can't resize FBO, invalid dimensions");t._shape[0]=e,t._shape[1]=r;for(var a=c(n),o=0;o>8*p&255;this.pickOffset=r,i.bind();var d=i.uniforms;d.viewTransform=t,d.pickOffset=e,d.shape=this.shape;var m=i.attributes;return this.positionBuffer.bind(),m.position.pointer(),this.weightBuffer.bind(),m.weight.pointer(s.UNSIGNED_BYTE,!1),this.idBuffer.bind(),m.pickId.pointer(s.UNSIGNED_BYTE,!1),s.drawArrays(s.TRIANGLES,0,o),r+this.shape[0]*this.shape[1]}}}(),f.pick=function(t,e,r){var n=this.pickOffset,i=this.shape[0]*this.shape[1];if(r=n+i)return null;var a=r-n,o=this.xData,s=this.yData;return{object:this,pointId:a,dataCoord:[o[a%this.shape[0]],s[a/this.shape[0]|0]]}},f.update=function(t){var e=(t=t||{}).shape||[0,0],r=t.x||i(e[0]),o=t.y||i(e[1]),s=t.z||new Float32Array(e[0]*e[1]),l=!1!==t.zsmooth;this.xData=r,this.yData=o;var c,u,f,p,d=t.colorLevels||[0],m=t.colorValues||[0,0,0,1],g=d.length,v=this.bounds;l?(c=v[0]=r[0],u=v[1]=o[0],f=v[2]=r[r.length-1],p=v[3]=o[o.length-1]):(c=v[0]=r[0]+(r[1]-r[0])/2,u=v[1]=o[0]+(o[1]-o[0])/2,f=v[2]=r[r.length-1]+(r[r.length-1]-r[r.length-2])/2,p=v[3]=o[o.length-1]+(o[o.length-1]-o[o.length-2])/2);var y=1/(f-c),x=1/(p-u),b=e[0],_=e[1];this.shape=[b,_];var w=(l?(b-1)*(_-1):b*_)*(h.length>>>1);this.numVertices=w;for(var T=a.mallocUint8(4*w),k=a.mallocFloat32(2*w),A=a.mallocUint8(2*w),M=a.mallocUint32(w),S=0,E=l?b-1:b,L=l?_-1:_,C=0;C max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform sampler2D dashTexture;\nuniform float dashScale;\nuniform float opacity;\n\nvarying vec3 worldPosition;\nvarying float pixelArcLength;\nvarying vec4 fragColor;\n\nvoid main() {\n if (\n outOfRange(clipBounds[0], clipBounds[1], worldPosition) ||\n fragColor.a * opacity == 0.\n ) discard;\n\n float dashWeight = texture2D(dashTexture, vec2(dashScale * pixelArcLength, 0)).r;\n if(dashWeight < 0.5) {\n discard;\n }\n gl_FragColor = fragColor * opacity;\n}\n"]),s=n(["precision highp float;\n#define GLSLIFY 1\n\n#define FLOAT_MAX 1.70141184e38\n#define FLOAT_MIN 1.17549435e-38\n\n// https://github.com/mikolalysenko/glsl-read-float/blob/master/index.glsl\nvec4 packFloat(float v) {\n float av = abs(v);\n\n //Handle special cases\n if(av < FLOAT_MIN) {\n return vec4(0.0, 0.0, 0.0, 0.0);\n } else if(v > FLOAT_MAX) {\n return vec4(127.0, 128.0, 0.0, 0.0) / 255.0;\n } else if(v < -FLOAT_MAX) {\n return vec4(255.0, 128.0, 0.0, 0.0) / 255.0;\n }\n\n vec4 c = vec4(0,0,0,0);\n\n //Compute exponent and mantissa\n float e = floor(log2(av));\n float m = av * pow(2.0, -e) - 1.0;\n\n //Unpack mantissa\n c[1] = floor(128.0 * m);\n m -= c[1] / 128.0;\n c[2] = floor(32768.0 * m);\n m -= c[2] / 32768.0;\n c[3] = floor(8388608.0 * m);\n\n //Unpack exponent\n float ebias = e + 127.0;\n c[0] = floor(ebias / 2.0);\n ebias -= c[0] * 2.0;\n c[1] += floor(ebias) * 128.0;\n\n //Unpack sign bit\n c[0] += 128.0 * step(0.0, -v);\n\n //Scale back to range\n return c / 255.0;\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform float pickId;\nuniform vec3 clipBounds[2];\n\nvarying vec3 worldPosition;\nvarying float pixelArcLength;\nvarying vec4 fragColor;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], worldPosition)) discard;\n\n gl_FragColor = vec4(pickId/255.0, packFloat(pixelArcLength).xyz);\n}"]),l=[{name:"position",type:"vec3"},{name:"nextPosition",type:"vec3"},{name:"arcLength",type:"float"},{name:"lineWidth",type:"float"},{name:"color",type:"vec4"}];r.createShader=function(t){return i(t,a,o,null,l)},r.createPickShader=function(t){return i(t,a,s,null,l)}},{"gl-shader":132,glslify:231}],91:[function(t,e,r){"use strict";e.exports=function(t){var e=t.gl||t.scene&&t.scene.gl,r=f(e);r.attributes.position.location=0,r.attributes.nextPosition.location=1,r.attributes.arcLength.location=2,r.attributes.lineWidth.location=3,r.attributes.color.location=4;var o=h(e);o.attributes.position.location=0,o.attributes.nextPosition.location=1,o.attributes.arcLength.location=2,o.attributes.lineWidth.location=3,o.attributes.color.location=4;for(var s=n(e),l=i(e,[{buffer:s,size:3,offset:0,stride:48},{buffer:s,size:3,offset:12,stride:48},{buffer:s,size:1,offset:24,stride:48},{buffer:s,size:1,offset:28,stride:48},{buffer:s,size:4,offset:32,stride:48}]),u=c(new Array(1024),[256,1,4]),p=0;p<1024;++p)u.data[p]=255;var d=a(e,u);d.wrap=e.REPEAT;var m=new v(e,r,o,s,l,d);return m.update(t),m};var n=t("gl-buffer"),i=t("gl-vao"),a=t("gl-texture2d"),o=new Uint8Array(4),s=new Float32Array(o.buffer);var l=t("binary-search-bounds"),c=t("ndarray"),u=t("./lib/shaders"),f=u.createShader,h=u.createPickShader,p=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function d(t,e){for(var r=0,n=0;n<3;++n){var i=t[n]-e[n];r+=i*i}return Math.sqrt(r)}function m(t){for(var e=[[-1e6,-1e6,-1e6],[1e6,1e6,1e6]],r=0;r<3;++r)e[0][r]=Math.max(t[0][r],e[0][r]),e[1][r]=Math.min(t[1][r],e[1][r]);return e}function g(t,e,r,n){this.arcLength=t,this.position=e,this.index=r,this.dataCoordinate=n}function v(t,e,r,n,i,a){this.gl=t,this.shader=e,this.pickShader=r,this.buffer=n,this.vao=i,this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.points=[],this.arcLength=[],this.vertexCount=0,this.bounds=[[0,0,0],[0,0,0]],this.pickId=0,this.lineWidth=1,this.texture=a,this.dashScale=1,this.opacity=1,this.hasAlpha=!1,this.dirty=!0,this.pixelRatio=1}var y=v.prototype;y.isTransparent=function(){return this.hasAlpha},y.isOpaque=function(){return!this.hasAlpha},y.pickSlots=1,y.setPickBase=function(t){this.pickId=t},y.drawTransparent=y.draw=function(t){if(this.vertexCount){var e=this.gl,r=this.shader,n=this.vao;r.bind(),r.uniforms={model:t.model||p,view:t.view||p,projection:t.projection||p,clipBounds:m(this.clipBounds),dashTexture:this.texture.bind(),dashScale:this.dashScale/this.arcLength[this.arcLength.length-1],opacity:this.opacity,screenShape:[e.drawingBufferWidth,e.drawingBufferHeight],pixelRatio:this.pixelRatio},n.bind(),n.draw(e.TRIANGLE_STRIP,this.vertexCount),n.unbind()}},y.drawPick=function(t){if(this.vertexCount){var e=this.gl,r=this.pickShader,n=this.vao;r.bind(),r.uniforms={model:t.model||p,view:t.view||p,projection:t.projection||p,pickId:this.pickId,clipBounds:m(this.clipBounds),screenShape:[e.drawingBufferWidth,e.drawingBufferHeight],pixelRatio:this.pixelRatio},n.bind(),n.draw(e.TRIANGLE_STRIP,this.vertexCount),n.unbind()}},y.update=function(t){var e,r;this.dirty=!0;var n=!!t.connectGaps;"dashScale"in t&&(this.dashScale=t.dashScale),this.hasAlpha=!1,"opacity"in t&&(this.opacity=+t.opacity,this.opacity<1&&(this.hasAlpha=!0));var i=[],a=[],o=[],s=0,u=0,f=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],h=t.position||t.positions;if(h){var p=t.color||t.colors||[0,0,0,1],m=t.lineWidth||1,g=!1;t:for(e=1;e0){for(var w=0;w<24;++w)i.push(i[i.length-12]);u+=2,g=!0}continue t}f[0][r]=Math.min(f[0][r],b[r],_[r]),f[1][r]=Math.max(f[1][r],b[r],_[r])}Array.isArray(p[0])?(v=p.length>e-1?p[e-1]:p.length>0?p[p.length-1]:[0,0,0,1],y=p.length>e?p[e]:p.length>0?p[p.length-1]:[0,0,0,1]):v=y=p,3===v.length&&(v=[v[0],v[1],v[2],1]),3===y.length&&(y=[y[0],y[1],y[2],1]),!this.hasAlpha&&v[3]<1&&(this.hasAlpha=!0),x=Array.isArray(m)?m.length>e-1?m[e-1]:m.length>0?m[m.length-1]:[0,0,0,1]:m;var T=s;if(s+=d(b,_),g){for(r=0;r<2;++r)i.push(b[0],b[1],b[2],_[0],_[1],_[2],T,x,v[0],v[1],v[2],v[3]);u+=2,g=!1}i.push(b[0],b[1],b[2],_[0],_[1],_[2],T,x,v[0],v[1],v[2],v[3],b[0],b[1],b[2],_[0],_[1],_[2],T,-x,v[0],v[1],v[2],v[3],_[0],_[1],_[2],b[0],b[1],b[2],s,-x,y[0],y[1],y[2],y[3],_[0],_[1],_[2],b[0],b[1],b[2],s,x,y[0],y[1],y[2],y[3]),u+=4}}if(this.buffer.update(i),a.push(s),o.push(h[h.length-1].slice()),this.bounds=f,this.vertexCount=u,this.points=o,this.arcLength=a,"dashes"in t){var k=t.dashes.slice();for(k.unshift(0),e=1;e1.0001)return null;v+=g[f]}if(Math.abs(v-1)>.001)return null;return[h,s(t,g),g]}},{barycentric:14,"polytope-closest-point/lib/closest_point_2d.js":270}],111:[function(t,e,r){var n=t("glslify"),i=n(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position, normal;\nattribute vec4 color;\nattribute vec2 uv;\n\nuniform mat4 model\n , view\n , projection\n , inverseModel;\nuniform vec3 eyePosition\n , lightPosition;\n\nvarying vec3 f_normal\n , f_lightDirection\n , f_eyeDirection\n , f_data;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvec4 project(vec3 p) {\n return projection * view * model * vec4(p, 1.0);\n}\n\nvoid main() {\n gl_Position = project(position);\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * vec4(position , 1.0);\n cameraCoordinate.xyz /= cameraCoordinate.w;\n f_lightDirection = lightPosition - cameraCoordinate.xyz;\n f_eyeDirection = eyePosition - cameraCoordinate.xyz;\n f_normal = normalize((vec4(normal, 0.0) * inverseModel).xyz);\n\n f_color = color;\n f_data = position;\n f_uv = uv;\n}\n"]),a=n(["#extension GL_OES_standard_derivatives : enable\n\nprecision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat cookTorranceSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness,\n float fresnel) {\n\n float VdotN = max(dot(viewDirection, surfaceNormal), 0.0);\n float LdotN = max(dot(lightDirection, surfaceNormal), 0.0);\n\n //Half angle vector\n vec3 H = normalize(lightDirection + viewDirection);\n\n //Geometric term\n float NdotH = max(dot(surfaceNormal, H), 0.0);\n float VdotH = max(dot(viewDirection, H), 0.000001);\n float LdotH = max(dot(lightDirection, H), 0.000001);\n float G1 = (2.0 * NdotH * VdotN) / VdotH;\n float G2 = (2.0 * NdotH * LdotN) / LdotH;\n float G = min(1.0, min(G1, G2));\n \n //Distribution term\n float D = beckmannDistribution(NdotH, roughness);\n\n //Fresnel term\n float F = pow(1.0 - VdotN, fresnel);\n\n //Multiply terms and done\n return G * F * D / max(3.14159265 * VdotN, 0.000001);\n}\n\n//#pragma glslify: beckmann = require(glsl-specular-beckmann) // used in gl-surface3d\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float roughness\n , fresnel\n , kambient\n , kdiffuse\n , kspecular;\nuniform sampler2D texture;\n\nvarying vec3 f_normal\n , f_lightDirection\n , f_eyeDirection\n , f_data;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (f_color.a == 0.0 ||\n outOfRange(clipBounds[0], clipBounds[1], f_data)\n ) discard;\n\n vec3 N = normalize(f_normal);\n vec3 L = normalize(f_lightDirection);\n vec3 V = normalize(f_eyeDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = min(1.0, max(0.0, cookTorranceSpecular(L, V, N, roughness, fresnel)));\n //float specular = max(0.0, beckmann(L, V, N, roughness)); // used in gl-surface3d\n\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n vec4 surfaceColor = vec4(f_color.rgb, 1.0) * texture2D(texture, f_uv);\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = litColor * f_color.a;\n}\n"]),o=n(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 uv;\n\nuniform mat4 model, view, projection;\n\nvarying vec4 f_color;\nvarying vec3 f_data;\nvarying vec2 f_uv;\n\nvoid main() {\n gl_Position = projection * view * model * vec4(position, 1.0);\n f_color = color;\n f_data = position;\n f_uv = uv;\n}"]),s=n(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform sampler2D texture;\nuniform float opacity;\n\nvarying vec4 f_color;\nvarying vec3 f_data;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_data)) discard;\n\n gl_FragColor = f_color * texture2D(texture, f_uv) * opacity;\n}"]),l=n(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 uv;\nattribute float pointSize;\n\nuniform mat4 model, view, projection;\nuniform vec3 clipBounds[2];\n\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0.0, 0.0 ,0.0 ,0.0);\n } else {\n gl_Position = projection * view * model * vec4(position, 1.0);\n }\n gl_PointSize = pointSize;\n f_color = color;\n f_uv = uv;\n}"]),c=n(["precision highp float;\n#define GLSLIFY 1\n\nuniform sampler2D texture;\nuniform float opacity;\n\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n vec2 pointR = gl_PointCoord.xy - vec2(0.5, 0.5);\n if(dot(pointR, pointR) > 0.25) {\n discard;\n }\n gl_FragColor = f_color * texture2D(texture, f_uv) * opacity;\n}"]),u=n(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n gl_Position = projection * view * model * vec4(position, 1.0);\n f_id = id;\n f_position = position;\n}"]),f=n(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n\n gl_FragColor = vec4(pickId, f_id.xyz);\n}"]),h=n(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute float pointSize;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform vec3 clipBounds[2];\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0.0, 0.0, 0.0, 0.0);\n } else {\n gl_Position = projection * view * model * vec4(position, 1.0);\n gl_PointSize = pointSize;\n }\n f_id = id;\n f_position = position;\n}"]),p=n(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\n\nuniform mat4 model, view, projection;\n\nvoid main() {\n gl_Position = projection * view * model * vec4(position, 1.0);\n}"]),d=n(["precision highp float;\n#define GLSLIFY 1\n\nuniform vec3 contourColor;\n\nvoid main() {\n gl_FragColor = vec4(contourColor, 1.0);\n}\n"]);r.meshShader={vertex:i,fragment:a,attributes:[{name:"position",type:"vec3"},{name:"normal",type:"vec3"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"}]},r.wireShader={vertex:o,fragment:s,attributes:[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"}]},r.pointShader={vertex:l,fragment:c,attributes:[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"},{name:"pointSize",type:"float"}]},r.pickShader={vertex:u,fragment:f,attributes:[{name:"position",type:"vec3"},{name:"id",type:"vec4"}]},r.pointPickShader={vertex:h,fragment:f,attributes:[{name:"position",type:"vec3"},{name:"pointSize",type:"float"},{name:"id",type:"vec4"}]},r.contourShader={vertex:p,fragment:d,attributes:[{name:"position",type:"vec3"}]}},{glslify:231}],112:[function(t,e,r){"use strict";var n=t("gl-shader"),i=t("gl-buffer"),a=t("gl-vao"),o=t("gl-texture2d"),s=t("normals"),l=t("gl-mat4/multiply"),c=t("gl-mat4/invert"),u=t("ndarray"),f=t("colormap"),h=t("simplicial-complex-contour"),p=t("typedarray-pool"),d=t("./lib/shaders"),m=t("./lib/closest-point"),g=d.meshShader,v=d.wireShader,y=d.pointShader,x=d.pickShader,b=d.pointPickShader,_=d.contourShader,w=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function T(t,e,r,n,i,a,o,s,l,c,u,f,h,p,d,m,g,v,y,x,b,_,T,k,A,M,S){this.gl=t,this.pixelRatio=1,this.cells=[],this.positions=[],this.intensity=[],this.texture=e,this.dirty=!0,this.triShader=r,this.lineShader=n,this.pointShader=i,this.pickShader=a,this.pointPickShader=o,this.contourShader=s,this.trianglePositions=l,this.triangleColors=u,this.triangleNormals=h,this.triangleUVs=f,this.triangleIds=c,this.triangleVAO=p,this.triangleCount=0,this.lineWidth=1,this.edgePositions=d,this.edgeColors=g,this.edgeUVs=v,this.edgeIds=m,this.edgeVAO=y,this.edgeCount=0,this.pointPositions=x,this.pointColors=_,this.pointUVs=T,this.pointSizes=k,this.pointIds=b,this.pointVAO=A,this.pointCount=0,this.contourLineWidth=1,this.contourPositions=M,this.contourVAO=S,this.contourCount=0,this.contourColor=[0,0,0],this.contourEnable=!0,this.pickVertex=!0,this.pickId=1,this.bounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.lightPosition=[1e5,1e5,0],this.ambientLight=.8,this.diffuseLight=.8,this.specularLight=2,this.roughness=.5,this.fresnel=1.5,this.opacity=1,this.hasAlpha=!1,this.opacityscale=!1,this._model=w,this._view=w,this._projection=w,this._resolution=[1,1]}var k=T.prototype;function A(t,e){if(!e)return 1;if(!e.length)return 1;for(var r=0;rt&&r>0){var n=(e[r][0]-t)/(e[r][0]-e[r-1][0]);return e[r][1]*(1-n)+n*e[r-1][1]}}return 1}function M(t){var e=n(t,g.vertex,g.fragment);return e.attributes.position.location=0,e.attributes.color.location=2,e.attributes.uv.location=3,e.attributes.normal.location=4,e}function S(t){var e=n(t,v.vertex,v.fragment);return e.attributes.position.location=0,e.attributes.color.location=2,e.attributes.uv.location=3,e}function E(t){var e=n(t,y.vertex,y.fragment);return e.attributes.position.location=0,e.attributes.color.location=2,e.attributes.uv.location=3,e.attributes.pointSize.location=4,e}function L(t){var e=n(t,x.vertex,x.fragment);return e.attributes.position.location=0,e.attributes.id.location=1,e}function C(t){var e=n(t,b.vertex,b.fragment);return e.attributes.position.location=0,e.attributes.id.location=1,e.attributes.pointSize.location=4,e}function P(t){var e=n(t,_.vertex,_.fragment);return e.attributes.position.location=0,e}k.isOpaque=function(){return!this.hasAlpha},k.isTransparent=function(){return this.hasAlpha},k.pickSlots=1,k.setPickBase=function(t){this.pickId=t},k.highlight=function(t){if(t&&this.contourEnable){for(var e=h(this.cells,this.intensity,t.intensity),r=e.cells,n=e.vertexIds,i=e.vertexWeights,a=r.length,o=p.mallocFloat32(6*a),s=0,l=0;l0&&((f=this.triShader).bind(),f.uniforms=s,this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind());this.edgeCount>0&&this.lineWidth>0&&((f=this.lineShader).bind(),f.uniforms=s,this.edgeVAO.bind(),e.lineWidth(this.lineWidth*this.pixelRatio),e.drawArrays(e.LINES,0,2*this.edgeCount),this.edgeVAO.unbind());this.pointCount>0&&((f=this.pointShader).bind(),f.uniforms=s,this.pointVAO.bind(),e.drawArrays(e.POINTS,0,this.pointCount),this.pointVAO.unbind());this.contourEnable&&this.contourCount>0&&this.contourLineWidth>0&&((f=this.contourShader).bind(),f.uniforms=s,this.contourVAO.bind(),e.drawArrays(e.LINES,0,this.contourCount),this.contourVAO.unbind())},k.drawPick=function(t){t=t||{};for(var e=this.gl,r=t.model||w,n=t.view||w,i=t.projection||w,a=[[-1e6,-1e6,-1e6],[1e6,1e6,1e6]],o=0;o<3;++o)a[0][o]=Math.max(a[0][o],this.clipBounds[0][o]),a[1][o]=Math.min(a[1][o],this.clipBounds[1][o]);this._model=[].slice.call(r),this._view=[].slice.call(n),this._projection=[].slice.call(i),this._resolution=[e.drawingBufferWidth,e.drawingBufferHeight];var s,l={model:r,view:n,projection:i,clipBounds:a,pickId:this.pickId/255};((s=this.pickShader).bind(),s.uniforms=l,this.triangleCount>0&&(this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind()),this.edgeCount>0&&(this.edgeVAO.bind(),e.lineWidth(this.lineWidth*this.pixelRatio),e.drawArrays(e.LINES,0,2*this.edgeCount),this.edgeVAO.unbind()),this.pointCount>0)&&((s=this.pointPickShader).bind(),s.uniforms=l,this.pointVAO.bind(),e.drawArrays(e.POINTS,0,this.pointCount),this.pointVAO.unbind())},k.pick=function(t){if(!t)return null;if(t.id!==this.pickId)return null;for(var e=t.value[0]+256*t.value[1]+65536*t.value[2],r=this.cells[e],n=this.positions,i=new Array(r.length),a=0;ai[k]&&(r.uniforms.dataAxis=c,r.uniforms.screenOffset=u,r.uniforms.color=g[t],r.uniforms.angle=v[t],a.drawArrays(a.TRIANGLES,i[k],i[A]-i[k]))),y[t]&&T&&(u[1^t]-=M*p*x[t],r.uniforms.dataAxis=f,r.uniforms.screenOffset=u,r.uniforms.color=b[t],r.uniforms.angle=_[t],a.drawArrays(a.TRIANGLES,w,T)),u[1^t]=M*s[2+(1^t)]-1,d[t+2]&&(u[1^t]+=M*p*m[t+2],ki[k]&&(r.uniforms.dataAxis=c,r.uniforms.screenOffset=u,r.uniforms.color=g[t+2],r.uniforms.angle=v[t+2],a.drawArrays(a.TRIANGLES,i[k],i[A]-i[k]))),y[t+2]&&T&&(u[1^t]+=M*p*x[t+2],r.uniforms.dataAxis=f,r.uniforms.screenOffset=u,r.uniforms.color=b[t+2],r.uniforms.angle=_[t+2],a.drawArrays(a.TRIANGLES,w,T))}),m.drawTitle=function(){var t=[0,0],e=[0,0];return function(){var r=this.plot,n=this.shader,i=r.gl,a=r.screenBox,o=r.titleCenter,s=r.titleAngle,l=r.titleColor,c=r.pixelRatio;if(this.titleCount){for(var u=0;u<2;++u)e[u]=2*(o[u]*c-a[u])/(a[2+u]-a[u])-1;n.bind(),n.uniforms.dataAxis=t,n.uniforms.screenOffset=e,n.uniforms.angle=s,n.uniforms.color=l,i.drawArrays(i.TRIANGLES,this.titleOffset,this.titleCount)}}}(),m.bind=(h=[0,0],p=[0,0],d=[0,0],function(){var t=this.plot,e=this.shader,r=t._tickBounds,n=t.dataBox,i=t.screenBox,a=t.viewBox;e.bind();for(var o=0;o<2;++o){var s=r[o],l=r[o+2]-s,c=.5*(n[o+2]+n[o]),u=n[o+2]-n[o],f=a[o],m=a[o+2]-f,g=i[o],v=i[o+2]-g;p[o]=2*l/u*m/v,h[o]=2*(s-c)/u*m/v}d[1]=2*t.pixelRatio/(i[3]-i[1]),d[0]=d[1]*(i[3]-i[1])/(i[2]-i[0]),e.uniforms.dataScale=p,e.uniforms.dataShift=h,e.uniforms.textScale=d,this.vbo.bind(),e.attributes.textCoordinate.pointer()}),m.update=function(t){var e,r,n,i,o,s=[],l=t.ticks,c=t.bounds;for(o=0;o<2;++o){var u=[Math.floor(s.length/3)],f=[-1/0],h=l[o];for(e=0;e=0){var m=e[d]-n[d]*(e[d+2]-e[d])/(n[d+2]-n[d]);0===d?o.drawLine(m,e[1],m,e[3],p[d],h[d]):o.drawLine(e[0],m,e[2],m,p[d],h[d])}}for(d=0;d=0;--t)this.objects[t].dispose();this.objects.length=0;for(t=this.overlays.length-1;t>=0;--t)this.overlays[t].dispose();this.overlays.length=0,this.gl=null},c.addObject=function(t){this.objects.indexOf(t)<0&&(this.objects.push(t),this.setDirty())},c.removeObject=function(t){for(var e=this.objects,r=0;rMath.abs(e))c.rotate(a,0,0,-t*r*Math.PI*d.rotateSpeed/window.innerWidth);else if(!d._ortho){var o=-d.zoomSpeed*i*e/window.innerHeight*(a-c.lastT())/20;c.pan(a,0,0,f*(Math.exp(o)-1))}}}),!0)},d.enableMouseListeners(),d};var n=t("right-now"),i=t("3d-view"),a=t("mouse-change"),o=t("mouse-wheel"),s=t("mouse-event-offset"),l=t("has-passive-events")},{"3d-view":7,"has-passive-events":232,"mouse-change":247,"mouse-event-offset":248,"mouse-wheel":250,"right-now":278}],120:[function(t,e,r){var n=t("glslify"),i=t("gl-shader"),a=n(["precision mediump float;\n#define GLSLIFY 1\nattribute vec2 position;\nvarying vec2 uv;\nvoid main() {\n uv = position;\n gl_Position = vec4(position, 0, 1);\n}"]),o=n(["precision mediump float;\n#define GLSLIFY 1\n\nuniform sampler2D accumBuffer;\nvarying vec2 uv;\n\nvoid main() {\n vec4 accum = texture2D(accumBuffer, 0.5 * (uv + 1.0));\n gl_FragColor = min(vec4(1,1,1,1), accum);\n}"]);e.exports=function(t){return i(t,a,o,null,[{name:"position",type:"vec2"}])}},{"gl-shader":132,glslify:231}],121:[function(t,e,r){"use strict";var n=t("./camera.js"),i=t("gl-axes3d"),a=t("gl-axes3d/properties"),o=t("gl-spikes3d"),s=t("gl-select-static"),l=t("gl-fbo"),c=t("a-big-triangle"),u=t("mouse-change"),f=t("gl-mat4/perspective"),h=t("gl-mat4/ortho"),p=t("./lib/shader"),d=t("is-mobile")({tablet:!0,featureDetect:!0});function m(){this.mouse=[-1,-1],this.screen=null,this.distance=1/0,this.index=null,this.dataCoordinate=null,this.dataPosition=null,this.object=null,this.data=null}function g(t){var e=Math.round(Math.log(Math.abs(t))/Math.log(10));if(e<0){var r=Math.round(Math.pow(10,-e));return Math.ceil(t*r)/r}if(e>0){r=Math.round(Math.pow(10,e));return Math.ceil(t/r)*r}return Math.ceil(t)}function v(t){return"boolean"!=typeof t||t}e.exports={createScene:function(t){(t=t||{}).camera=t.camera||{};var e=t.canvas;if(!e){if(e=document.createElement("canvas"),t.container)t.container.appendChild(e);else document.body.appendChild(e)}var r=t.gl;r||(t.glOptions&&(d=!!t.glOptions.preserveDrawingBuffer),r=function(t,e){var r=null;try{(r=t.getContext("webgl",e))||(r=t.getContext("experimental-webgl",e))}catch(t){return null}return r}(e,t.glOptions||{premultipliedAlpha:!0,antialias:!0,preserveDrawingBuffer:d}));if(!r)throw new Error("webgl not supported");var y=t.bounds||[[-10,-10,-10],[10,10,10]],x=new m,b=l(r,r.drawingBufferWidth,r.drawingBufferHeight,{preferFloat:!d}),_=p(r),w=t.cameraObject&&!0===t.cameraObject._ortho||t.camera.projection&&"orthographic"===t.camera.projection.type||!1,T={eye:t.camera.eye||[2,0,0],center:t.camera.center||[0,0,0],up:t.camera.up||[0,1,0],zoomMin:t.camera.zoomMax||.1,zoomMax:t.camera.zoomMin||100,mode:t.camera.mode||"turntable",_ortho:w},k=t.axes||{},A=i(r,k);A.enable=!k.disable;var M=t.spikes||{},S=o(r,M),E=[],L=[],C=[],P=[],I=!0,O=!0,z=new Array(16),D=new Array(16),R={view:null,projection:z,model:D,_ortho:!1},F=(O=!0,[r.drawingBufferWidth,r.drawingBufferHeight]),B=t.cameraObject||n(e,T),N={gl:r,contextLost:!1,pixelRatio:t.pixelRatio||1,canvas:e,selection:x,camera:B,axes:A,axesPixels:null,spikes:S,bounds:y,objects:E,shape:F,aspect:t.aspectRatio||[1,1,1],pickRadius:t.pickRadius||10,zNear:t.zNear||.01,zFar:t.zFar||1e3,fovy:t.fovy||Math.PI/4,clearColor:t.clearColor||[0,0,0,0],autoResize:v(t.autoResize),autoBounds:v(t.autoBounds),autoScale:!!t.autoScale,autoCenter:v(t.autoCenter),clipToBounds:v(t.clipToBounds),snapToData:!!t.snapToData,onselect:t.onselect||null,onrender:t.onrender||null,onclick:t.onclick||null,cameraParams:R,oncontextloss:null,mouseListener:null,_stopped:!1,getAspectratio:function(){return{x:this.aspect[0],y:this.aspect[1],z:this.aspect[2]}},setAspectratio:function(t){this.aspect[0]=t.x,this.aspect[1]=t.y,this.aspect[2]=t.z,O=!0},setBounds:function(t,e){this.bounds[0][t]=e.min,this.bounds[1][t]=e.max},setClearColor:function(t){this.clearColor=t},clearRGBA:function(){this.gl.clearColor(this.clearColor[0],this.clearColor[1],this.clearColor[2],this.clearColor[3]),this.gl.clear(this.gl.COLOR_BUFFER_BIT|this.gl.DEPTH_BUFFER_BIT)}},j=[r.drawingBufferWidth/N.pixelRatio|0,r.drawingBufferHeight/N.pixelRatio|0];function U(){if(!N._stopped&&N.autoResize){var t=e.parentNode,r=1,n=1;t&&t!==document.body?(r=t.clientWidth,n=t.clientHeight):(r=window.innerWidth,n=window.innerHeight);var i=0|Math.ceil(r*N.pixelRatio),a=0|Math.ceil(n*N.pixelRatio);if(i!==e.width||a!==e.height){e.width=i,e.height=a;var o=e.style;o.position=o.position||"absolute",o.left="0px",o.top="0px",o.width=r+"px",o.height=n+"px",I=!0}}}N.autoResize&&U();function V(){for(var t=E.length,e=P.length,n=0;n0&&0===C[e-1];)C.pop(),P.pop().dispose()}function H(){if(N.contextLost)return!0;r.isContextLost()&&(N.contextLost=!0,N.mouseListener.enabled=!1,N.selection.object=null,N.oncontextloss&&N.oncontextloss())}window.addEventListener("resize",U),N.update=function(t){N._stopped||(t=t||{},I=!0,O=!0)},N.add=function(t){N._stopped||(t.axes=A,E.push(t),L.push(-1),I=!0,O=!0,V())},N.remove=function(t){if(!N._stopped){var e=E.indexOf(t);e<0||(E.splice(e,1),L.pop(),I=!0,O=!0,V())}},N.dispose=function(){if(!N._stopped&&(N._stopped=!0,window.removeEventListener("resize",U),e.removeEventListener("webglcontextlost",H),N.mouseListener.enabled=!1,!N.contextLost)){A.dispose(),S.dispose();for(var t=0;tx.distance)continue;for(var c=0;c 1.0) {\n discard;\n }\n baseColor = mix(borderColor, color, step(radius, centerFraction));\n gl_FragColor = vec4(baseColor.rgb * baseColor.a, baseColor.a);\n }\n}\n"]),r.pickVertex=n(["precision mediump float;\n#define GLSLIFY 1\n\nattribute vec2 position;\nattribute vec4 pickId;\n\nuniform mat3 matrix;\nuniform float pointSize;\nuniform vec4 pickOffset;\n\nvarying vec4 fragId;\n\nvoid main() {\n vec3 hgPosition = matrix * vec3(position, 1);\n gl_Position = vec4(hgPosition.xy, 0, hgPosition.z);\n gl_PointSize = pointSize;\n\n vec4 id = pickId + pickOffset;\n id.y += floor(id.x / 256.0);\n id.x -= floor(id.x / 256.0) * 256.0;\n\n id.z += floor(id.y / 256.0);\n id.y -= floor(id.y / 256.0) * 256.0;\n\n id.w += floor(id.z / 256.0);\n id.z -= floor(id.z / 256.0) * 256.0;\n\n fragId = id;\n}\n"]),r.pickFragment=n(["precision mediump float;\n#define GLSLIFY 1\n\nvarying vec4 fragId;\n\nvoid main() {\n float radius = length(2.0 * gl_PointCoord.xy - 1.0);\n if(radius > 1.0) {\n discard;\n }\n gl_FragColor = fragId / 255.0;\n}\n"])},{glslify:231}],123:[function(t,e,r){"use strict";var n=t("gl-shader"),i=t("gl-buffer"),a=t("typedarray-pool"),o=t("./lib/shader");function s(t,e,r,n,i){this.plot=t,this.offsetBuffer=e,this.pickBuffer=r,this.shader=n,this.pickShader=i,this.sizeMin=.5,this.sizeMinCap=2,this.sizeMax=20,this.areaRatio=1,this.pointCount=0,this.color=[1,0,0,1],this.borderColor=[0,0,0,1],this.blend=!1,this.pickOffset=0,this.points=null}e.exports=function(t,e){var r=t.gl,a=i(r),l=i(r),c=n(r,o.pointVertex,o.pointFragment),u=n(r,o.pickVertex,o.pickFragment),f=new s(t,a,l,c,u);return f.update(e),t.addObject(f),f};var l,c,u=s.prototype;u.dispose=function(){this.shader.dispose(),this.pickShader.dispose(),this.offsetBuffer.dispose(),this.pickBuffer.dispose(),this.plot.removeObject(this)},u.update=function(t){var e;function r(e,r){return e in t?t[e]:r}t=t||{},this.sizeMin=r("sizeMin",.5),this.sizeMax=r("sizeMax",20),this.color=r("color",[1,0,0,1]).slice(),this.areaRatio=r("areaRatio",1),this.borderColor=r("borderColor",[0,0,0,1]).slice(),this.blend=r("blend",!1);var n=t.positions.length>>>1,i=t.positions instanceof Float32Array,o=t.idToIndex instanceof Int32Array&&t.idToIndex.length>=n,s=t.positions,l=i?s:a.mallocFloat32(s.length),c=o?t.idToIndex:a.mallocInt32(n);if(i||l.set(s),!o)for(l.set(s),e=0;e>>1;for(r=0;r=e[0]&&a<=e[2]&&o>=e[1]&&o<=e[3]&&n++}return n}(this.points,i),u=this.plot.pickPixelRatio*Math.max(Math.min(this.sizeMinCap,this.sizeMin),Math.min(this.sizeMax,this.sizeMax/Math.pow(s,.33333)));l[0]=2/a,l[4]=2/o,l[6]=-2*i[0]/a-1,l[7]=-2*i[1]/o-1,this.offsetBuffer.bind(),r.bind(),r.attributes.position.pointer(),r.uniforms.matrix=l,r.uniforms.color=this.color,r.uniforms.borderColor=this.borderColor,r.uniforms.pointCloud=u<5,r.uniforms.pointSize=u,r.uniforms.centerFraction=Math.min(1,Math.max(0,Math.sqrt(1-this.areaRatio))),e&&(c[0]=255&t,c[1]=t>>8&255,c[2]=t>>16&255,c[3]=t>>24&255,this.pickBuffer.bind(),r.attributes.pickId.pointer(n.UNSIGNED_BYTE),r.uniforms.pickOffset=c,this.pickOffset=t);var f=n.getParameter(n.BLEND),h=n.getParameter(n.DITHER);return f&&!this.blend&&n.disable(n.BLEND),h&&n.disable(n.DITHER),n.drawArrays(n.POINTS,0,this.pointCount),f&&!this.blend&&n.enable(n.BLEND),h&&n.enable(n.DITHER),t+this.pointCount}),u.draw=u.unifiedDraw,u.drawPick=u.unifiedDraw,u.pick=function(t,e,r){var n=this.pickOffset,i=this.pointCount;if(r=n+i)return null;var a=r-n,o=this.points;return{object:this,pointId:a,dataCoord:[o[2*a],o[2*a+1]]}}},{"./lib/shader":122,"gl-buffer":78,"gl-shader":132,"typedarray-pool":308}],124:[function(t,e,r){e.exports=function(t,e,r,n){var i,a,o,s,l,c=e[0],u=e[1],f=e[2],h=e[3],p=r[0],d=r[1],m=r[2],g=r[3];(a=c*p+u*d+f*m+h*g)<0&&(a=-a,p=-p,d=-d,m=-m,g=-g);1-a>1e-6?(i=Math.acos(a),o=Math.sin(i),s=Math.sin((1-n)*i)/o,l=Math.sin(n*i)/o):(s=1-n,l=n);return t[0]=s*c+l*p,t[1]=s*u+l*d,t[2]=s*f+l*m,t[3]=s*h+l*g,t}},{}],125:[function(t,e,r){"use strict";e.exports=function(t){return t||0===t?t.toString():""}},{}],126:[function(t,e,r){"use strict";var n=t("vectorize-text");e.exports=function(t,e,r){var a=i[e];a||(a=i[e]={});if(t in a)return a[t];var o={textAlign:"center",textBaseline:"middle",lineHeight:1,font:e,lineSpacing:1.25,styletags:{breaklines:!0,bolds:!0,italics:!0,subscripts:!0,superscripts:!0},triangles:!0},s=n(t,o);o.triangles=!1;var l,c,u=n(t,o);if(r&&1!==r){for(l=0;l max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 glyph;\nattribute vec4 id;\n\nuniform vec4 highlightId;\nuniform float highlightScale;\nuniform mat4 model, view, projection;\nuniform vec3 clipBounds[2];\n\nvarying vec4 interpColor;\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0,0,0,0);\n } else {\n float scale = 1.0;\n if(distance(highlightId, id) < 0.0001) {\n scale = highlightScale;\n }\n\n vec4 worldPosition = model * vec4(position, 1);\n vec4 viewPosition = view * worldPosition;\n viewPosition = viewPosition / viewPosition.w;\n vec4 clipPosition = projection * (viewPosition + scale * vec4(glyph.x, -glyph.y, 0, 0));\n\n gl_Position = clipPosition;\n interpColor = color;\n pickId = id;\n dataCoordinate = position;\n }\n}"]),o=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 glyph;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform vec2 screenSize;\nuniform vec3 clipBounds[2];\nuniform float highlightScale, pixelRatio;\nuniform vec4 highlightId;\n\nvarying vec4 interpColor;\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0,0,0,0);\n } else {\n float scale = pixelRatio;\n if(distance(highlightId.bgr, id.bgr) < 0.001) {\n scale *= highlightScale;\n }\n\n vec4 worldPosition = model * vec4(position, 1.0);\n vec4 viewPosition = view * worldPosition;\n vec4 clipPosition = projection * viewPosition;\n clipPosition /= clipPosition.w;\n\n gl_Position = clipPosition + vec4(screenSize * scale * vec2(glyph.x, -glyph.y), 0.0, 0.0);\n interpColor = color;\n pickId = id;\n dataCoordinate = position;\n }\n}"]),s=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 glyph;\nattribute vec4 id;\n\nuniform float highlightScale;\nuniform vec4 highlightId;\nuniform vec3 axes[2];\nuniform mat4 model, view, projection;\nuniform vec2 screenSize;\nuniform vec3 clipBounds[2];\nuniform float scale, pixelRatio;\n\nvarying vec4 interpColor;\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0,0,0,0);\n } else {\n float lscale = pixelRatio * scale;\n if(distance(highlightId, id) < 0.0001) {\n lscale *= highlightScale;\n }\n\n vec4 clipCenter = projection * view * model * vec4(position, 1);\n vec3 dataPosition = position + 0.5*lscale*(axes[0] * glyph.x + axes[1] * glyph.y) * clipCenter.w * screenSize.y;\n vec4 clipPosition = projection * view * model * vec4(dataPosition, 1);\n\n gl_Position = clipPosition;\n interpColor = color;\n pickId = id;\n dataCoordinate = dataPosition;\n }\n}\n"]),l=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 fragClipBounds[2];\nuniform float opacity;\n\nvarying vec4 interpColor;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (\n outOfRange(fragClipBounds[0], fragClipBounds[1], dataCoordinate) ||\n interpColor.a * opacity == 0.\n ) discard;\n gl_FragColor = interpColor * opacity;\n}\n"]),c=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 fragClipBounds[2];\nuniform float pickGroup;\n\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(fragClipBounds[0], fragClipBounds[1], dataCoordinate)) discard;\n\n gl_FragColor = vec4(pickGroup, pickId.bgr);\n}"]),u=[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"glyph",type:"vec2"},{name:"id",type:"vec4"}],f={vertex:a,fragment:l,attributes:u},h={vertex:o,fragment:l,attributes:u},p={vertex:s,fragment:l,attributes:u},d={vertex:a,fragment:c,attributes:u},m={vertex:o,fragment:c,attributes:u},g={vertex:s,fragment:c,attributes:u};function v(t,e){var r=n(t,e),i=r.attributes;return i.position.location=0,i.color.location=1,i.glyph.location=2,i.id.location=3,r}r.createPerspective=function(t){return v(t,f)},r.createOrtho=function(t){return v(t,h)},r.createProject=function(t){return v(t,p)},r.createPickPerspective=function(t){return v(t,d)},r.createPickOrtho=function(t){return v(t,m)},r.createPickProject=function(t){return v(t,g)}},{"gl-shader":132,glslify:231}],128:[function(t,e,r){"use strict";var n=t("is-string-blank"),i=t("gl-buffer"),a=t("gl-vao"),o=t("typedarray-pool"),s=t("gl-mat4/multiply"),l=t("./lib/shaders"),c=t("./lib/glyphs"),u=t("./lib/get-simple-string"),f=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function h(t,e){var r=t[0],n=t[1],i=t[2],a=t[3];return t[0]=e[0]*r+e[4]*n+e[8]*i+e[12]*a,t[1]=e[1]*r+e[5]*n+e[9]*i+e[13]*a,t[2]=e[2]*r+e[6]*n+e[10]*i+e[14]*a,t[3]=e[3]*r+e[7]*n+e[11]*i+e[15]*a,t}function p(t,e,r,n){return h(n,n),h(n,n),h(n,n)}function d(t,e){this.index=t,this.dataCoordinate=this.position=e}function m(t){return!0===t||t>1?1:t}function g(t,e,r,n,i,a,o,s,l,c,u,f){this.gl=t,this.pixelRatio=1,this.shader=e,this.orthoShader=r,this.projectShader=n,this.pointBuffer=i,this.colorBuffer=a,this.glyphBuffer=o,this.idBuffer=s,this.vao=l,this.vertexCount=0,this.lineVertexCount=0,this.opacity=1,this.hasAlpha=!1,this.lineWidth=0,this.projectScale=[2/3,2/3,2/3],this.projectOpacity=[1,1,1],this.projectHasAlpha=!1,this.pickId=0,this.pickPerspectiveShader=c,this.pickOrthoShader=u,this.pickProjectShader=f,this.points=[],this._selectResult=new d(0,[0,0,0]),this.useOrtho=!0,this.bounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.axesProject=[!0,!0,!0],this.axesBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.highlightId=[1,1,1,1],this.highlightScale=2,this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.dirty=!0}e.exports=function(t){var e=t.gl,r=l.createPerspective(e),n=l.createOrtho(e),o=l.createProject(e),s=l.createPickPerspective(e),c=l.createPickOrtho(e),u=l.createPickProject(e),f=i(e),h=i(e),p=i(e),d=i(e),m=a(e,[{buffer:f,size:3,type:e.FLOAT},{buffer:h,size:4,type:e.FLOAT},{buffer:p,size:2,type:e.FLOAT},{buffer:d,size:4,type:e.UNSIGNED_BYTE,normalized:!0}]),v=new g(e,r,n,o,f,h,p,d,m,s,c,u);return v.update(t),v};var v=g.prototype;v.pickSlots=1,v.setPickBase=function(t){this.pickId=t},v.isTransparent=function(){if(this.hasAlpha)return!0;for(var t=0;t<3;++t)if(this.axesProject[t]&&this.projectHasAlpha)return!0;return!1},v.isOpaque=function(){if(!this.hasAlpha)return!0;for(var t=0;t<3;++t)if(this.axesProject[t]&&!this.projectHasAlpha)return!0;return!1};var y=[0,0],x=[0,0,0],b=[0,0,0],_=[0,0,0,1],w=[0,0,0,1],T=f.slice(),k=[0,0,0],A=[[0,0,0],[0,0,0]];function M(t){return t[0]=t[1]=t[2]=0,t}function S(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t[3]=1,t}function E(t,e,r,n){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t[r]=n,t}function L(t,e,r,n){var i,a=e.axesProject,o=e.gl,l=t.uniforms,c=r.model||f,u=r.view||f,h=r.projection||f,d=e.axesBounds,m=function(t){for(var e=A,r=0;r<2;++r)for(var n=0;n<3;++n)e[r][n]=Math.max(Math.min(t[r][n],1e8),-1e8);return e}(e.clipBounds);i=e.axes&&e.axes.lastCubeProps?e.axes.lastCubeProps.axis:[1,1,1],y[0]=2/o.drawingBufferWidth,y[1]=2/o.drawingBufferHeight,t.bind(),l.view=u,l.projection=h,l.screenSize=y,l.highlightId=e.highlightId,l.highlightScale=e.highlightScale,l.clipBounds=m,l.pickGroup=e.pickId/255,l.pixelRatio=n;for(var g=0;g<3;++g)if(a[g]){l.scale=e.projectScale[g],l.opacity=e.projectOpacity[g];for(var v=T,L=0;L<16;++L)v[L]=0;for(L=0;L<4;++L)v[5*L]=1;v[5*g]=0,i[g]<0?v[12+g]=d[0][g]:v[12+g]=d[1][g],s(v,c,v),l.model=v;var C=(g+1)%3,P=(g+2)%3,I=M(x),O=M(b);I[C]=1,O[P]=1;var z=p(0,0,0,S(_,I)),D=p(0,0,0,S(w,O));if(Math.abs(z[1])>Math.abs(D[1])){var R=z;z=D,D=R,R=I,I=O,O=R;var F=C;C=P,P=F}z[0]<0&&(I[C]=-1),D[1]>0&&(O[P]=-1);var B=0,N=0;for(L=0;L<4;++L)B+=Math.pow(c[4*C+L],2),N+=Math.pow(c[4*P+L],2);I[C]/=Math.sqrt(B),O[P]/=Math.sqrt(N),l.axes[0]=I,l.axes[1]=O,l.fragClipBounds[0]=E(k,m[0],g,-1e8),l.fragClipBounds[1]=E(k,m[1],g,1e8),e.vao.bind(),e.vao.draw(o.TRIANGLES,e.vertexCount),e.lineWidth>0&&(o.lineWidth(e.lineWidth*n),e.vao.draw(o.LINES,e.lineVertexCount,e.vertexCount)),e.vao.unbind()}}var C=[[-1e8,-1e8,-1e8],[1e8,1e8,1e8]];function P(t,e,r,n,i,a,o){var s=r.gl;if((a===r.projectHasAlpha||o)&&L(e,r,n,i),a===r.hasAlpha||o){t.bind();var l=t.uniforms;l.model=n.model||f,l.view=n.view||f,l.projection=n.projection||f,y[0]=2/s.drawingBufferWidth,y[1]=2/s.drawingBufferHeight,l.screenSize=y,l.highlightId=r.highlightId,l.highlightScale=r.highlightScale,l.fragClipBounds=C,l.clipBounds=r.axes.bounds,l.opacity=r.opacity,l.pickGroup=r.pickId/255,l.pixelRatio=i,r.vao.bind(),r.vao.draw(s.TRIANGLES,r.vertexCount),r.lineWidth>0&&(s.lineWidth(r.lineWidth*i),r.vao.draw(s.LINES,r.lineVertexCount,r.vertexCount)),r.vao.unbind()}}function I(t,e,r,i){var a;a=Array.isArray(t)?e=this.pointCount||e<0)return null;var r=this.points[e],n=this._selectResult;n.index=e;for(var i=0;i<3;++i)n.position[i]=n.dataCoordinate[i]=r[i];return n},v.highlight=function(t){if(t){var e=t.index,r=255&e,n=e>>8&255,i=e>>16&255;this.highlightId=[r/255,n/255,i/255,0]}else this.highlightId=[1,1,1,1]},v.update=function(t){if("perspective"in(t=t||{})&&(this.useOrtho=!t.perspective),"orthographic"in t&&(this.useOrtho=!!t.orthographic),"lineWidth"in t&&(this.lineWidth=t.lineWidth),"project"in t)if(Array.isArray(t.project))this.axesProject=t.project;else{var e=!!t.project;this.axesProject=[e,e,e]}if("projectScale"in t)if(Array.isArray(t.projectScale))this.projectScale=t.projectScale.slice();else{var r=+t.projectScale;this.projectScale=[r,r,r]}if(this.projectHasAlpha=!1,"projectOpacity"in t){if(Array.isArray(t.projectOpacity))this.projectOpacity=t.projectOpacity.slice();else{r=+t.projectOpacity;this.projectOpacity=[r,r,r]}for(var n=0;n<3;++n)this.projectOpacity[n]=m(this.projectOpacity[n]),this.projectOpacity[n]<1&&(this.projectHasAlpha=!0)}this.hasAlpha=!1,"opacity"in t&&(this.opacity=m(t.opacity),this.opacity<1&&(this.hasAlpha=!0)),this.dirty=!0;var i,a,s=t.position,l=t.font||"normal",c=t.alignment||[0,0];if(2===c.length)i=c[0],a=c[1];else{i=[],a=[];for(n=0;n0){var O=0,z=x,D=[0,0,0,1],R=[0,0,0,1],F=Array.isArray(p)&&Array.isArray(p[0]),B=Array.isArray(v)&&Array.isArray(v[0]);t:for(n=0;n<_;++n){y+=1;for(w=s[n],T=0;T<3;++T){if(isNaN(w[T])||!isFinite(w[T]))continue t;f[T]=Math.max(f[T],w[T]),u[T]=Math.min(u[T],w[T])}k=(N=I(h,n,l,this.pixelRatio)).mesh,A=N.lines,M=N.bounds;var N,j=N.visible;if(j)if(Array.isArray(p)){if(3===(U=F?n0?1-M[0][0]:Y<0?1+M[1][0]:1,W*=W>0?1-M[0][1]:W<0?1+M[1][1]:1],Z=k.cells||[],J=k.positions||[];for(T=0;T0){var v=r*u;o.drawBox(f-v,h-v,p+v,h+v,a),o.drawBox(f-v,d-v,p+v,d+v,a),o.drawBox(f-v,h-v,f+v,d+v,a),o.drawBox(p-v,h-v,p+v,d+v,a)}}}},s.update=function(t){t=t||{},this.innerFill=!!t.innerFill,this.outerFill=!!t.outerFill,this.innerColor=(t.innerColor||[0,0,0,.5]).slice(),this.outerColor=(t.outerColor||[0,0,0,.5]).slice(),this.borderColor=(t.borderColor||[0,0,0,1]).slice(),this.borderWidth=t.borderWidth||0,this.selectBox=(t.selectBox||this.selectBox).slice()},s.dispose=function(){this.boxBuffer.dispose(),this.boxShader.dispose(),this.plot.removeOverlay(this)}},{"./lib/shaders":129,"gl-buffer":78,"gl-shader":132}],131:[function(t,e,r){"use strict";e.exports=function(t,e){var r=e[0],a=e[1],o=n(t,r,a,{}),s=i.mallocUint8(r*a*4);return new l(t,o,s)};var n=t("gl-fbo"),i=t("typedarray-pool"),a=t("ndarray"),o=t("bit-twiddle").nextPow2;function s(t,e,r,n,i){this.coord=[t,e],this.id=r,this.value=n,this.distance=i}function l(t,e,r){this.gl=t,this.fbo=e,this.buffer=r,this._readTimeout=null;var n=this;this._readCallback=function(){n.gl&&(e.bind(),t.readPixels(0,0,e.shape[0],e.shape[1],t.RGBA,t.UNSIGNED_BYTE,n.buffer),n._readTimeout=null)}}var c=l.prototype;Object.defineProperty(c,"shape",{get:function(){return this.gl?this.fbo.shape.slice():[0,0]},set:function(t){if(this.gl){this.fbo.shape=t;var e=this.fbo.shape[0],r=this.fbo.shape[1];if(r*e*4>this.buffer.length){i.free(this.buffer);for(var n=this.buffer=i.mallocUint8(o(r*e*4)),a=0;ar)for(t=r;te)for(t=e;t=0){for(var T=0|w.type.charAt(w.type.length-1),k=new Array(T),A=0;A=0;)M+=1;_[y]=M}var S=new Array(r.length);function E(){h.program=o.program(p,h._vref,h._fref,b,_);for(var t=0;t=0){if((d=h.charCodeAt(h.length-1)-48)<2||d>4)throw new n("","Invalid data type for attribute "+f+": "+h);s(t,e,p[0],i,d,a,f)}else{if(!(h.indexOf("mat")>=0))throw new n("","Unknown data type for attribute "+f+": "+h);var d;if((d=h.charCodeAt(h.length-1)-48)<2||d>4)throw new n("","Invalid data type for attribute "+f+": "+h);l(t,e,p,i,d,a,f)}}}return a};var n=t("./GLError");function i(t,e,r,n,i,a){this._gl=t,this._wrapper=e,this._index=r,this._locations=n,this._dimension=i,this._constFunc=a}var a=i.prototype;a.pointer=function(t,e,r,n){var i=this._gl,a=this._locations[this._index];i.vertexAttribPointer(a,this._dimension,t||i.FLOAT,!!e,r||0,n||0),i.enableVertexAttribArray(a)},a.set=function(t,e,r,n){return this._constFunc(this._locations[this._index],t,e,r,n)},Object.defineProperty(a,"location",{get:function(){return this._locations[this._index]},set:function(t){return t!==this._locations[this._index]&&(this._locations[this._index]=0|t,this._wrapper.program=null),0|t}});var o=[function(t,e,r){return void 0===r.length?t.vertexAttrib1f(e,r):t.vertexAttrib1fv(e,r)},function(t,e,r,n){return void 0===r.length?t.vertexAttrib2f(e,r,n):t.vertexAttrib2fv(e,r)},function(t,e,r,n,i){return void 0===r.length?t.vertexAttrib3f(e,r,n,i):t.vertexAttrib3fv(e,r)},function(t,e,r,n,i,a){return void 0===r.length?t.vertexAttrib4f(e,r,n,i,a):t.vertexAttrib4fv(e,r)}];function s(t,e,r,n,a,s,l){var c=o[a],u=new i(t,e,r,n,a,c);Object.defineProperty(s,l,{set:function(e){return t.disableVertexAttribArray(n[r]),c(t,n[r],e),e},get:function(){return u},enumerable:!0})}function l(t,e,r,n,i,a,o){for(var l=new Array(i),c=new Array(i),u=0;u4)throw new i("","Invalid uniform dimension type for matrix "+name+": "+v);t["uniformMatrix"+g+"fv"](s[u],!1,f);break}throw new i("","Unknown uniform data type for "+name+": "+v)}if((g=v.charCodeAt(v.length-1)-48)<2||g>4)throw new i("","Invalid data type");switch(v.charAt(0)){case"b":case"i":t["uniform"+g+"iv"](s[u],f);break;case"v":t["uniform"+g+"fv"](s[u],f);break;default:throw new i("","Unrecognized data type for vector "+name+": "+v)}}}}}}function c(t,e,n){if("object"==typeof n){var c=u(n);Object.defineProperty(t,e,{get:a(c),set:l(n),enumerable:!0,configurable:!1})}else s[n]?Object.defineProperty(t,e,{get:(f=n,function(t,e,r){return t.getUniform(e.program,r[f])}),set:l(n),enumerable:!0,configurable:!1}):t[e]=function(t){switch(t){case"bool":return!1;case"int":case"sampler2D":case"samplerCube":case"float":return 0;default:var e=t.indexOf("vec");if(0<=e&&e<=1&&t.length===4+e){if((r=t.charCodeAt(t.length-1)-48)<2||r>4)throw new i("","Invalid data type");return"b"===t.charAt(0)?o(r,!1):o(r,0)}if(0===t.indexOf("mat")&&4===t.length){var r;if((r=t.charCodeAt(t.length-1)-48)<2||r>4)throw new i("","Invalid uniform dimension type for matrix "+name+": "+t);return o(r*r,0)}throw new i("","Unknown uniform data type for "+name+": "+t)}}(r[n].type);var f}function u(t){var e;if(Array.isArray(t)){e=new Array(t.length);for(var r=0;r1){s[0]in a||(a[s[0]]=[]),a=a[s[0]];for(var l=1;l1)for(var l=0;l 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the tube vertex and normal at the given index.\n//\n// The returned vertex is for a tube ring with its center at origin, radius of length(d), pointing in the direction of d.\n//\n// Each tube segment is made up of a ring of vertices.\n// These vertices are used to make up the triangles of the tube by connecting them together in the vertex array.\n// The indexes of tube segments run from 0 to 8.\n//\nvec3 getTubePosition(vec3 d, float index, out vec3 normal) {\n float segmentCount = 8.0;\n\n float angle = 2.0 * 3.14159 * (index / segmentCount);\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d);\n vec3 y = v * sin(angle) * length(d);\n vec3 v3 = x + y;\n\n normal = normalize(v3);\n\n return v3;\n}\n\nattribute vec4 vector;\nattribute vec4 color, position;\nattribute vec2 uv;\n\nuniform float vectorScale, tubeScale;\nuniform mat4 model, view, projection, inverseModel;\nuniform vec3 eyePosition, lightPosition;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n // Scale the vector magnitude to stay constant with\n // model & view changes.\n vec3 normal;\n vec3 XYZ = getTubePosition(mat3(model) * (tubeScale * vector.w * normalize(vector.xyz)), position.w, normal);\n vec4 tubePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * tubePosition;\n cameraCoordinate.xyz /= cameraCoordinate.w;\n f_lightDirection = lightPosition - cameraCoordinate.xyz;\n f_eyeDirection = eyePosition - cameraCoordinate.xyz;\n f_normal = normalize((vec4(normal, 0.0) * inverseModel).xyz);\n\n // vec4 m_position = model * vec4(tubePosition, 1.0);\n vec4 t_position = view * tubePosition;\n gl_Position = projection * t_position;\n\n f_color = color;\n f_data = tubePosition.xyz;\n f_position = position.xyz;\n f_uv = uv;\n}\n"]),a=n(["#extension GL_OES_standard_derivatives : enable\n\nprecision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat cookTorranceSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness,\n float fresnel) {\n\n float VdotN = max(dot(viewDirection, surfaceNormal), 0.0);\n float LdotN = max(dot(lightDirection, surfaceNormal), 0.0);\n\n //Half angle vector\n vec3 H = normalize(lightDirection + viewDirection);\n\n //Geometric term\n float NdotH = max(dot(surfaceNormal, H), 0.0);\n float VdotH = max(dot(viewDirection, H), 0.000001);\n float LdotH = max(dot(lightDirection, H), 0.000001);\n float G1 = (2.0 * NdotH * VdotN) / VdotH;\n float G2 = (2.0 * NdotH * LdotN) / LdotH;\n float G = min(1.0, min(G1, G2));\n \n //Distribution term\n float D = beckmannDistribution(NdotH, roughness);\n\n //Fresnel term\n float F = pow(1.0 - VdotN, fresnel);\n\n //Multiply terms and done\n return G * F * D / max(3.14159265 * VdotN, 0.000001);\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float roughness, fresnel, kambient, kdiffuse, kspecular, opacity;\nuniform sampler2D texture;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n vec3 N = normalize(f_normal);\n vec3 L = normalize(f_lightDirection);\n vec3 V = normalize(f_eyeDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = min(1.0, max(0.0, cookTorranceSpecular(L, V, N, roughness, fresnel)));\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n vec4 surfaceColor = f_color * texture2D(texture, f_uv);\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = litColor * opacity;\n}\n"]),o=n(["precision highp float;\n\nprecision highp float;\n#define GLSLIFY 1\n\nvec3 getOrthogonalVector(vec3 v) {\n // Return up-vector for only-z vector.\n // Return ax + by + cz = 0, a point that lies on the plane that has v as a normal and that isn't (0,0,0).\n // From the above if-statement we have ||a|| > 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the tube vertex and normal at the given index.\n//\n// The returned vertex is for a tube ring with its center at origin, radius of length(d), pointing in the direction of d.\n//\n// Each tube segment is made up of a ring of vertices.\n// These vertices are used to make up the triangles of the tube by connecting them together in the vertex array.\n// The indexes of tube segments run from 0 to 8.\n//\nvec3 getTubePosition(vec3 d, float index, out vec3 normal) {\n float segmentCount = 8.0;\n\n float angle = 2.0 * 3.14159 * (index / segmentCount);\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d);\n vec3 y = v * sin(angle) * length(d);\n vec3 v3 = x + y;\n\n normal = normalize(v3);\n\n return v3;\n}\n\nattribute vec4 vector;\nattribute vec4 position;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform float tubeScale;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n vec3 normal;\n vec3 XYZ = getTubePosition(mat3(model) * (tubeScale * vector.w * normalize(vector.xyz)), position.w, normal);\n vec4 tubePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n\n gl_Position = projection * view * tubePosition;\n f_id = id;\n f_position = position.xyz;\n}\n"]),s=n(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n\n gl_FragColor = vec4(pickId, f_id.xyz);\n}"]);r.meshShader={vertex:i,fragment:a,attributes:[{name:"position",type:"vec4"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"},{name:"vector",type:"vec4"}]},r.pickShader={vertex:o,fragment:s,attributes:[{name:"position",type:"vec4"},{name:"id",type:"vec4"},{name:"vector",type:"vec4"}]}},{glslify:231}],143:[function(t,e,r){"use strict";var n=t("gl-vec3"),i=t("gl-vec4"),a=["xyz","xzy","yxz","yzx","zxy","zyx"],o=function(t,e,r,a){for(var o=0,s=0;s0)for(T=0;T<8;T++){var k=(T+1)%8;c.push(h[T],p[T],p[k],p[k],h[k],h[T]),f.push(y,v,v,v,y,y),d.push(m,g,g,g,m,m);var A=c.length;u.push([A-6,A-5,A-4],[A-3,A-2,A-1])}var M=h;h=p,p=M;var S=y;y=v,v=S;var E=m;m=g,g=E}return{positions:c,cells:u,vectors:f,vertexIntensity:d}}(t,r,a,o)})),f=[],h=[],p=[],d=[];for(s=0;se)return r-1}return r},l=function(t,e,r){return tr?r:t},c=function(t){var e=1/0;t.sort((function(t,e){return t-e}));for(var r=t.length,n=1;nf-1||y>h-1||x>p-1)return n.create();var b,_,w,T,k,A,M=a[0][d],S=a[0][v],E=a[1][m],L=a[1][y],C=a[2][g],P=(o-M)/(S-M),I=(c-E)/(L-E),O=(u-C)/(a[2][x]-C);switch(isFinite(P)||(P=.5),isFinite(I)||(I=.5),isFinite(O)||(O=.5),r.reversedX&&(d=f-1-d,v=f-1-v),r.reversedY&&(m=h-1-m,y=h-1-y),r.reversedZ&&(g=p-1-g,x=p-1-x),r.filled){case 5:k=g,A=x,w=m*p,T=y*p,b=d*p*h,_=v*p*h;break;case 4:k=g,A=x,b=d*p,_=v*p,w=m*p*f,T=y*p*f;break;case 3:w=m,T=y,k=g*h,A=x*h,b=d*h*p,_=v*h*p;break;case 2:w=m,T=y,b=d*h,_=v*h,k=g*h*f,A=x*h*f;break;case 1:b=d,_=v,k=g*f,A=x*f,w=m*f*p,T=y*f*p;break;default:b=d,_=v,w=m*f,T=y*f,k=g*f*h,A=x*f*h}var z=i[b+w+k],D=i[b+w+A],R=i[b+T+k],F=i[b+T+A],B=i[_+w+k],N=i[_+w+A],j=i[_+T+k],U=i[_+T+A],V=n.create(),H=n.create(),q=n.create(),G=n.create();n.lerp(V,z,B,P),n.lerp(H,D,N,P),n.lerp(q,R,j,P),n.lerp(G,F,U,P);var Y=n.create(),W=n.create();n.lerp(Y,V,q,I),n.lerp(W,H,G,I);var X=n.create();return n.lerp(X,Y,W,O),X}(e,t,p)},m=t.getDivergence||function(t,e){var r=n.create(),i=1e-4;n.add(r,t,[i,0,0]);var a=d(r);n.subtract(a,a,e),n.scale(a,a,1/i),n.add(r,t,[0,i,0]);var o=d(r);n.subtract(o,o,e),n.scale(o,o,1/i),n.add(r,t,[0,0,i]);var s=d(r);return n.subtract(s,s,e),n.scale(s,s,1/i),n.add(r,a,o),n.add(r,r,s),r},g=[],v=e[0][0],y=e[0][1],x=e[0][2],b=e[1][0],_=e[1][1],w=e[1][2],T=function(t){var e=t[0],r=t[1],n=t[2];return!(eb||r_||nw)},k=10*n.distance(e[0],e[1])/i,A=k*k,M=1,S=0,E=r.length;E>1&&(M=function(t){for(var e=[],r=[],n=[],i={},a={},o={},s=t.length,l=0;lS&&(S=F),D.push(F),g.push({points:P,velocities:I,divergences:D});for(var B=0;B<100*i&&P.lengthA&&n.scale(N,N,k/Math.sqrt(j)),n.add(N,N,C),O=d(N),n.squaredDistance(z,N)-A>-1e-4*A){P.push(N),z=N,I.push(O);R=m(N,O),F=n.length(R);isFinite(F)&&F>S&&(S=F),D.push(F)}C=N}}var U=o(g,t.colormap,S,M);return f?U.tubeScale=f:(0===S&&(S=1),U.tubeScale=.5*u*M/S),U};var u=t("./lib/shaders"),f=t("gl-cone3d").createMesh;e.exports.createTubeMesh=function(t,e){return f(t,e,{shaders:u,traceType:"streamtube"})}},{"./lib/shaders":142,"gl-cone3d":79,"gl-vec3":169,"gl-vec4":205}],144:[function(t,e,r){var n=t("gl-shader"),i=t("glslify"),a=i(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec4 uv;\nattribute vec3 f;\nattribute vec3 normal;\n\nuniform vec3 objectOffset;\nuniform mat4 model, view, projection, inverseModel;\nuniform vec3 lightPosition, eyePosition;\nuniform sampler2D colormap;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec2 planeCoordinate;\nvarying vec3 lightDirection, eyeDirection, surfaceNormal;\nvarying vec4 vColor;\n\nvoid main() {\n vec3 localCoordinate = vec3(uv.zw, f.x);\n worldCoordinate = objectOffset + localCoordinate;\n vec4 worldPosition = model * vec4(worldCoordinate, 1.0);\n vec4 clipPosition = projection * view * worldPosition;\n gl_Position = clipPosition;\n kill = f.y;\n value = f.z;\n planeCoordinate = uv.xy;\n\n vColor = texture2D(colormap, vec2(value, value));\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * worldPosition;\n cameraCoordinate.xyz /= cameraCoordinate.w;\n lightDirection = lightPosition - cameraCoordinate.xyz;\n eyeDirection = eyePosition - cameraCoordinate.xyz;\n surfaceNormal = normalize((vec4(normal,0) * inverseModel).xyz);\n}\n"]),o=i(["precision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat beckmannSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness) {\n return beckmannDistribution(dot(surfaceNormal, normalize(lightDirection + viewDirection)), roughness);\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 lowerBound, upperBound;\nuniform float contourTint;\nuniform vec4 contourColor;\nuniform sampler2D colormap;\nuniform vec3 clipBounds[2];\nuniform float roughness, fresnel, kambient, kdiffuse, kspecular, opacity;\nuniform float vertexColor;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec3 lightDirection, eyeDirection, surfaceNormal;\nvarying vec4 vColor;\n\nvoid main() {\n if (\n kill > 0.0 ||\n vColor.a == 0.0 ||\n outOfRange(clipBounds[0], clipBounds[1], worldCoordinate)\n ) discard;\n\n vec3 N = normalize(surfaceNormal);\n vec3 V = normalize(eyeDirection);\n vec3 L = normalize(lightDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = max(beckmannSpecular(L, V, N, roughness), 0.);\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n //decide how to interpolate color \u2014 in vertex or in fragment\n vec4 surfaceColor =\n step(vertexColor, .5) * texture2D(colormap, vec2(value, value)) +\n step(.5, vertexColor) * vColor;\n\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = mix(litColor, contourColor, contourTint) * opacity;\n}\n"]),s=i(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec4 uv;\nattribute float f;\n\nuniform vec3 objectOffset;\nuniform mat3 permutation;\nuniform mat4 model, view, projection;\nuniform float height, zOffset;\nuniform sampler2D colormap;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec2 planeCoordinate;\nvarying vec3 lightDirection, eyeDirection, surfaceNormal;\nvarying vec4 vColor;\n\nvoid main() {\n vec3 dataCoordinate = permutation * vec3(uv.xy, height);\n worldCoordinate = objectOffset + dataCoordinate;\n vec4 worldPosition = model * vec4(worldCoordinate, 1.0);\n\n vec4 clipPosition = projection * view * worldPosition;\n clipPosition.z += zOffset;\n\n gl_Position = clipPosition;\n value = f + objectOffset.z;\n kill = -1.0;\n planeCoordinate = uv.zw;\n\n vColor = texture2D(colormap, vec2(value, value));\n\n //Don't do lighting for contours\n surfaceNormal = vec3(1,0,0);\n eyeDirection = vec3(0,1,0);\n lightDirection = vec3(0,0,1);\n}\n"]),l=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec2 shape;\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec2 planeCoordinate;\nvarying vec3 surfaceNormal;\n\nvec2 splitFloat(float v) {\n float vh = 255.0 * v;\n float upper = floor(vh);\n float lower = fract(vh);\n return vec2(upper / 255.0, floor(lower * 16.0) / 16.0);\n}\n\nvoid main() {\n if ((kill > 0.0) ||\n (outOfRange(clipBounds[0], clipBounds[1], worldCoordinate))) discard;\n\n vec2 ux = splitFloat(planeCoordinate.x / shape.x);\n vec2 uy = splitFloat(planeCoordinate.y / shape.y);\n gl_FragColor = vec4(pickId, ux.x, uy.x, ux.y + (uy.y/16.0));\n}\n"]);r.createShader=function(t){var e=n(t,a,o,null,[{name:"uv",type:"vec4"},{name:"f",type:"vec3"},{name:"normal",type:"vec3"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e.attributes.normal.location=2,e},r.createPickShader=function(t){var e=n(t,a,l,null,[{name:"uv",type:"vec4"},{name:"f",type:"vec3"},{name:"normal",type:"vec3"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e.attributes.normal.location=2,e},r.createContourShader=function(t){var e=n(t,s,o,null,[{name:"uv",type:"vec4"},{name:"f",type:"float"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e},r.createPickContourShader=function(t){var e=n(t,s,l,null,[{name:"uv",type:"vec4"},{name:"f",type:"float"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e}},{"gl-shader":132,glslify:231}],145:[function(t,e,r){"use strict";e.exports=function(t){var e=t.gl,r=y(e),n=b(e),s=x(e),l=_(e),c=i(e),u=a(e,[{buffer:c,size:4,stride:40,offset:0},{buffer:c,size:3,stride:40,offset:16},{buffer:c,size:3,stride:40,offset:28}]),f=i(e),h=a(e,[{buffer:f,size:4,stride:20,offset:0},{buffer:f,size:1,stride:20,offset:16}]),p=i(e),d=a(e,[{buffer:p,size:2,type:e.FLOAT}]),m=o(e,1,256,e.RGBA,e.UNSIGNED_BYTE);m.minFilter=e.LINEAR,m.magFilter=e.LINEAR;var g=new M(e,[0,0],[[0,0,0],[0,0,0]],r,n,c,u,m,s,l,f,h,p,d,[0,0,0]),v={levels:[[],[],[]]};for(var w in t)v[w]=t[w];return v.colormap=v.colormap||"jet",g.update(v),g};var n=t("bit-twiddle"),i=t("gl-buffer"),a=t("gl-vao"),o=t("gl-texture2d"),s=t("typedarray-pool"),l=t("colormap"),c=t("ndarray-ops"),u=t("ndarray-pack"),f=t("ndarray"),h=t("surface-nets"),p=t("gl-mat4/multiply"),d=t("gl-mat4/invert"),m=t("binary-search-bounds"),g=t("ndarray-gradient"),v=t("./lib/shaders"),y=v.createShader,x=v.createContourShader,b=v.createPickShader,_=v.createPickContourShader,w=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1],T=[[0,0],[0,1],[1,0],[1,1],[1,0],[0,1]],k=[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];function A(t,e,r,n,i){this.position=t,this.index=e,this.uv=r,this.level=n,this.dataCoordinate=i}!function(){for(var t=0;t<3;++t){var e=k[t],r=(t+2)%3;e[(t+1)%3+0]=1,e[r+3]=1,e[t+6]=1}}();function M(t,e,r,n,i,a,o,l,c,u,h,p,d,m,g){this.gl=t,this.shape=e,this.bounds=r,this.objectOffset=g,this.intensityBounds=[],this._shader=n,this._pickShader=i,this._coordinateBuffer=a,this._vao=o,this._colorMap=l,this._contourShader=c,this._contourPickShader=u,this._contourBuffer=h,this._contourVAO=p,this._contourOffsets=[[],[],[]],this._contourCounts=[[],[],[]],this._vertexCount=0,this._pickResult=new A([0,0,0],[0,0],[0,0],[0,0,0],[0,0,0]),this._dynamicBuffer=d,this._dynamicVAO=m,this._dynamicOffsets=[0,0,0],this._dynamicCounts=[0,0,0],this.contourWidth=[1,1,1],this.contourLevels=[[1],[1],[1]],this.contourTint=[0,0,0],this.contourColor=[[.5,.5,.5,1],[.5,.5,.5,1],[.5,.5,.5,1]],this.showContour=!0,this.showSurface=!0,this.enableHighlight=[!0,!0,!0],this.highlightColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.highlightTint=[1,1,1],this.highlightLevel=[-1,-1,-1],this.enableDynamic=[!0,!0,!0],this.dynamicLevel=[NaN,NaN,NaN],this.dynamicColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.dynamicTint=[1,1,1],this.dynamicWidth=[1,1,1],this.axesBounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.surfaceProject=[!1,!1,!1],this.contourProject=[[!1,!1,!1],[!1,!1,!1],[!1,!1,!1]],this.colorBounds=[!1,!1],this._field=[f(s.mallocFloat(1024),[0,0]),f(s.mallocFloat(1024),[0,0]),f(s.mallocFloat(1024),[0,0])],this.pickId=1,this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.snapToData=!1,this.pixelRatio=1,this.opacity=1,this.lightPosition=[10,1e4,0],this.ambientLight=.8,this.diffuseLight=.8,this.specularLight=2,this.roughness=.5,this.fresnel=1.5,this.vertexColor=0,this.dirty=!0}var S=M.prototype;S.genColormap=function(t,e){var r=!1,n=u([l({colormap:t,nshades:256,format:"rgba"}).map((function(t,n){var i=e?function(t,e){if(!e)return 1;if(!e.length)return 1;for(var r=0;rt&&r>0){var n=(e[r][0]-t)/(e[r][0]-e[r-1][0]);return e[r][1]*(1-n)+n*e[r-1][1]}}return 1}(n/255,e):t[3];return i<1&&(r=!0),[t[0],t[1],t[2],255*i]}))]);return c.divseq(n,255),this.hasAlphaScale=r,n},S.isTransparent=function(){return this.opacity<1||this.hasAlphaScale},S.isOpaque=function(){return!this.isTransparent()},S.pickSlots=1,S.setPickBase=function(t){this.pickId=t};var E=[0,0,0],L={showSurface:!1,showContour:!1,projections:[w.slice(),w.slice(),w.slice()],clipBounds:[[[0,0,0],[0,0,0]],[[0,0,0],[0,0,0]],[[0,0,0],[0,0,0]]]};function C(t,e){var r,n,i,a=e.axes&&e.axes.lastCubeProps.axis||E,o=e.showSurface,s=e.showContour;for(r=0;r<3;++r)for(o=o||e.surfaceProject[r],n=0;n<3;++n)s=s||e.contourProject[r][n];for(r=0;r<3;++r){var l=L.projections[r];for(n=0;n<16;++n)l[n]=0;for(n=0;n<4;++n)l[5*n]=1;l[5*r]=0,l[12+r]=e.axesBounds[+(a[r]>0)][r],p(l,t.model,l);var c=L.clipBounds[r];for(i=0;i<2;++i)for(n=0;n<3;++n)c[i][n]=t.clipBounds[i][n];c[0][r]=-1e8,c[1][r]=1e8}return L.showSurface=o,L.showContour=s,L}var P={model:w,view:w,projection:w,inverseModel:w.slice(),lowerBound:[0,0,0],upperBound:[0,0,0],colorMap:0,clipBounds:[[0,0,0],[0,0,0]],height:0,contourTint:0,contourColor:[0,0,0,1],permutation:[1,0,0,0,1,0,0,0,1],zOffset:-1e-4,objectOffset:[0,0,0],kambient:1,kdiffuse:1,kspecular:1,lightPosition:[1e3,1e3,1e3],eyePosition:[0,0,0],roughness:1,fresnel:1,opacity:1,vertexColor:0},I=w.slice(),O=[1,0,0,0,1,0,0,0,1];function z(t,e){t=t||{};var r=this.gl;r.disable(r.CULL_FACE),this._colorMap.bind(0);var n=P;n.model=t.model||w,n.view=t.view||w,n.projection=t.projection||w,n.lowerBound=[this.bounds[0][0],this.bounds[0][1],this.colorBounds[0]||this.bounds[0][2]],n.upperBound=[this.bounds[1][0],this.bounds[1][1],this.colorBounds[1]||this.bounds[1][2]],n.objectOffset=this.objectOffset,n.contourColor=this.contourColor[0],n.inverseModel=d(n.inverseModel,n.model);for(var i=0;i<2;++i)for(var a=n.clipBounds[i],o=0;o<3;++o)a[o]=Math.min(Math.max(this.clipBounds[i][o],-1e8),1e8);n.kambient=this.ambientLight,n.kdiffuse=this.diffuseLight,n.kspecular=this.specularLight,n.roughness=this.roughness,n.fresnel=this.fresnel,n.opacity=this.opacity,n.height=0,n.permutation=O,n.vertexColor=this.vertexColor;var s=I;for(p(s,n.view,n.model),p(s,n.projection,s),d(s,s),i=0;i<3;++i)n.eyePosition[i]=s[12+i]/s[15];var l=s[15];for(i=0;i<3;++i)l+=this.lightPosition[i]*s[4*i+3];for(i=0;i<3;++i){var c=s[12+i];for(o=0;o<3;++o)c+=s[4*o+i]*this.lightPosition[o];n.lightPosition[i]=c/l}var u=C(n,this);if(u.showSurface){for(this._shader.bind(),this._shader.uniforms=n,this._vao.bind(),this.showSurface&&this._vertexCount&&this._vao.draw(r.TRIANGLES,this._vertexCount),i=0;i<3;++i)this.surfaceProject[i]&&this.vertexCount&&(this._shader.uniforms.model=u.projections[i],this._shader.uniforms.clipBounds=u.clipBounds[i],this._vao.draw(r.TRIANGLES,this._vertexCount));this._vao.unbind()}if(u.showContour){var f=this._contourShader;n.kambient=1,n.kdiffuse=0,n.kspecular=0,n.opacity=1,f.bind(),f.uniforms=n;var h=this._contourVAO;for(h.bind(),i=0;i<3;++i)for(f.uniforms.permutation=k[i],r.lineWidth(this.contourWidth[i]*this.pixelRatio),o=0;o>4)/16)/255,i=Math.floor(n),a=n-i,o=e[1]*(t.value[1]+(15&t.value[2])/16)/255,s=Math.floor(o),l=o-s;i+=1,s+=1;var c=r.position;c[0]=c[1]=c[2]=0;for(var u=0;u<2;++u)for(var f=u?a:1-a,h=0;h<2;++h)for(var p=i+u,d=s+h,g=f*(h?l:1-l),v=0;v<3;++v)c[v]+=this._field[v].get(p,d)*g;for(var y=this._pickResult.level,x=0;x<3;++x)if(y[x]=m.le(this.contourLevels[x],c[x]),y[x]<0)this.contourLevels[x].length>0&&(y[x]=0);else if(y[x]Math.abs(_-c[x])&&(y[x]+=1)}for(r.index[0]=a<.5?i:i+1,r.index[1]=l<.5?s:s+1,r.uv[0]=n/e[0],r.uv[1]=o/e[1],v=0;v<3;++v)r.dataCoordinate[v]=this._field[v].get(r.index[0],r.index[1]);return r},S.padField=function(t,e){var r=e.shape.slice(),n=t.shape.slice();c.assign(t.lo(1,1).hi(r[0],r[1]),e),c.assign(t.lo(1).hi(r[0],1),e.hi(r[0],1)),c.assign(t.lo(1,n[1]-1).hi(r[0],1),e.lo(0,r[1]-1).hi(r[0],1)),c.assign(t.lo(0,1).hi(1,r[1]),e.hi(1)),c.assign(t.lo(n[0]-1,1).hi(1,r[1]),e.lo(r[0]-1)),t.set(0,0,e.get(0,0)),t.set(0,n[1]-1,e.get(0,r[1]-1)),t.set(n[0]-1,0,e.get(r[0]-1,0)),t.set(n[0]-1,n[1]-1,e.get(r[0]-1,r[1]-1))},S.update=function(t){t=t||{},this.objectOffset=t.objectOffset||this.objectOffset,this.dirty=!0,"contourWidth"in t&&(this.contourWidth=R(t.contourWidth,Number)),"showContour"in t&&(this.showContour=R(t.showContour,Boolean)),"showSurface"in t&&(this.showSurface=!!t.showSurface),"contourTint"in t&&(this.contourTint=R(t.contourTint,Boolean)),"contourColor"in t&&(this.contourColor=B(t.contourColor)),"contourProject"in t&&(this.contourProject=R(t.contourProject,(function(t){return R(t,Boolean)}))),"surfaceProject"in t&&(this.surfaceProject=t.surfaceProject),"dynamicColor"in t&&(this.dynamicColor=B(t.dynamicColor)),"dynamicTint"in t&&(this.dynamicTint=R(t.dynamicTint,Number)),"dynamicWidth"in t&&(this.dynamicWidth=R(t.dynamicWidth,Number)),"opacity"in t&&(this.opacity=t.opacity),"opacityscale"in t&&(this.opacityscale=t.opacityscale),"colorBounds"in t&&(this.colorBounds=t.colorBounds),"vertexColor"in t&&(this.vertexColor=t.vertexColor?1:0),"colormap"in t&&this._colorMap.setPixels(this.genColormap(t.colormap,this.opacityscale));var e=t.field||t.coords&&t.coords[2]||null,r=!1;if(e||(e=this._field[2].shape[0]||this._field[2].shape[2]?this._field[2].lo(1,1).hi(this._field[2].shape[0]-2,this._field[2].shape[1]-2):this._field[2].hi(0,0)),"field"in t||"coords"in t){var i=(e.shape[0]+2)*(e.shape[1]+2);i>this._field[2].data.length&&(s.freeFloat(this._field[2].data),this._field[2].data=s.mallocFloat(n.nextPow2(i))),this._field[2]=f(this._field[2].data,[e.shape[0]+2,e.shape[1]+2]),this.padField(this._field[2],e),this.shape=e.shape.slice();for(var a=this.shape,o=0;o<2;++o)this._field[2].size>this._field[o].data.length&&(s.freeFloat(this._field[o].data),this._field[o].data=s.mallocFloat(this._field[2].size)),this._field[o]=f(this._field[o].data,[a[0]+2,a[1]+2]);if(t.coords){var l=t.coords;if(!Array.isArray(l)||3!==l.length)throw new Error("gl-surface: invalid coordinates for x/y");for(o=0;o<2;++o){var c=l[o];for(v=0;v<2;++v)if(c.shape[v]!==a[v])throw new Error("gl-surface: coords have incorrect shape");this.padField(this._field[o],c)}}else if(t.ticks){var u=t.ticks;if(!Array.isArray(u)||2!==u.length)throw new Error("gl-surface: invalid ticks");for(o=0;o<2;++o){var p=u[o];if((Array.isArray(p)||p.length)&&(p=f(p)),p.shape[0]!==a[o])throw new Error("gl-surface: invalid tick length");var d=f(p.data,a);d.stride[o]=p.stride[0],d.stride[1^o]=0,this.padField(this._field[o],d)}}else{for(o=0;o<2;++o){var m=[0,0];m[o]=1,this._field[o]=f(this._field[o].data,[a[0]+2,a[1]+2],m,0)}this._field[0].set(0,0,0);for(var v=0;v0){for(var xt=0;xt<5;++xt)Q.pop();U-=1}continue t}Q.push(nt[0],nt[1],ot[0],ot[1],nt[2]),U+=1}}rt.push(U)}this._contourOffsets[$]=et,this._contourCounts[$]=rt}var bt=s.mallocFloat(Q.length);for(o=0;oi||r<0||r>i)throw new Error("gl-texture2d: Invalid texture size");return t._shape=[e,r],t.bind(),n.texImage2D(n.TEXTURE_2D,0,t.format,e,r,0,t.format,t.type,null),t._mipLevels=[0],t}function p(t,e,r,n,i,a){this.gl=t,this.handle=e,this.format=i,this.type=a,this._shape=[r,n],this._mipLevels=[0],this._magFilter=t.NEAREST,this._minFilter=t.NEAREST,this._wrapS=t.CLAMP_TO_EDGE,this._wrapT=t.CLAMP_TO_EDGE,this._anisoSamples=1;var o=this,s=[this._wrapS,this._wrapT];Object.defineProperties(s,[{get:function(){return o._wrapS},set:function(t){return o.wrapS=t}},{get:function(){return o._wrapT},set:function(t){return o.wrapT=t}}]),this._wrapVector=s;var l=[this._shape[0],this._shape[1]];Object.defineProperties(l,[{get:function(){return o._shape[0]},set:function(t){return o.width=t}},{get:function(){return o._shape[1]},set:function(t){return o.height=t}}]),this._shapeVector=l}var d=p.prototype;function m(t,e){return 3===t.length?1===e[2]&&e[1]===t[0]*t[2]&&e[0]===t[2]:1===e[0]&&e[1]===t[0]}function g(t){var e=t.createTexture();return t.bindTexture(t.TEXTURE_2D,e),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_MIN_FILTER,t.NEAREST),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_MAG_FILTER,t.NEAREST),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_WRAP_S,t.CLAMP_TO_EDGE),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_WRAP_T,t.CLAMP_TO_EDGE),e}function v(t,e,r,n,i){var a=t.getParameter(t.MAX_TEXTURE_SIZE);if(e<0||e>a||r<0||r>a)throw new Error("gl-texture2d: Invalid texture shape");if(i===t.FLOAT&&!t.getExtension("OES_texture_float"))throw new Error("gl-texture2d: Floating point textures not supported on this platform");var o=g(t);return t.texImage2D(t.TEXTURE_2D,0,n,e,r,0,n,i,null),new p(t,o,e,r,n,i)}function y(t,e,r,n,i,a){var o=g(t);return t.texImage2D(t.TEXTURE_2D,0,i,i,a,e),new p(t,o,r,n,i,a)}function x(t,e){var r=e.dtype,o=e.shape.slice(),s=t.getParameter(t.MAX_TEXTURE_SIZE);if(o[0]<0||o[0]>s||o[1]<0||o[1]>s)throw new Error("gl-texture2d: Invalid texture size");var l=m(o,e.stride.slice()),c=0;"float32"===r?c=t.FLOAT:"float64"===r?(c=t.FLOAT,l=!1,r="float32"):"uint8"===r?c=t.UNSIGNED_BYTE:(c=t.UNSIGNED_BYTE,l=!1,r="uint8");var u,h,d=0;if(2===o.length)d=t.LUMINANCE,o=[o[0],o[1],1],e=n(e.data,o,[e.stride[0],e.stride[1],1],e.offset);else{if(3!==o.length)throw new Error("gl-texture2d: Invalid shape for texture");if(1===o[2])d=t.ALPHA;else if(2===o[2])d=t.LUMINANCE_ALPHA;else if(3===o[2])d=t.RGB;else{if(4!==o[2])throw new Error("gl-texture2d: Invalid shape for pixel coords");d=t.RGBA}}c!==t.FLOAT||t.getExtension("OES_texture_float")||(c=t.UNSIGNED_BYTE,l=!1);var v=e.size;if(l)u=0===e.offset&&e.data.length===v?e.data:e.data.subarray(e.offset,e.offset+v);else{var y=[o[2],o[2]*o[0],1];h=a.malloc(v,r);var x=n(h,o,y,0);"float32"!==r&&"float64"!==r||c!==t.UNSIGNED_BYTE?i.assign(x,e):f(x,e),u=h.subarray(0,v)}var b=g(t);return t.texImage2D(t.TEXTURE_2D,0,d,o[0],o[1],0,d,c,u),l||a.free(h),new p(t,b,o[0],o[1],d,c)}Object.defineProperties(d,{minFilter:{get:function(){return this._minFilter},set:function(t){this.bind();var e=this.gl;if(this.type===e.FLOAT&&o.indexOf(t)>=0&&(e.getExtension("OES_texture_float_linear")||(t=e.NEAREST)),s.indexOf(t)<0)throw new Error("gl-texture2d: Unknown filter mode "+t);return e.texParameteri(e.TEXTURE_2D,e.TEXTURE_MIN_FILTER,t),this._minFilter=t}},magFilter:{get:function(){return this._magFilter},set:function(t){this.bind();var e=this.gl;if(this.type===e.FLOAT&&o.indexOf(t)>=0&&(e.getExtension("OES_texture_float_linear")||(t=e.NEAREST)),s.indexOf(t)<0)throw new Error("gl-texture2d: Unknown filter mode "+t);return e.texParameteri(e.TEXTURE_2D,e.TEXTURE_MAG_FILTER,t),this._magFilter=t}},mipSamples:{get:function(){return this._anisoSamples},set:function(t){var e=this._anisoSamples;if(this._anisoSamples=0|Math.max(t,1),e!==this._anisoSamples){var r=this.gl.getExtension("EXT_texture_filter_anisotropic");r&&this.gl.texParameterf(this.gl.TEXTURE_2D,r.TEXTURE_MAX_ANISOTROPY_EXT,this._anisoSamples)}return this._anisoSamples}},wrapS:{get:function(){return this._wrapS},set:function(t){if(this.bind(),l.indexOf(t)<0)throw new Error("gl-texture2d: Unknown wrap mode "+t);return this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_WRAP_S,t),this._wrapS=t}},wrapT:{get:function(){return this._wrapT},set:function(t){if(this.bind(),l.indexOf(t)<0)throw new Error("gl-texture2d: Unknown wrap mode "+t);return this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_WRAP_T,t),this._wrapT=t}},wrap:{get:function(){return this._wrapVector},set:function(t){if(Array.isArray(t)||(t=[t,t]),2!==t.length)throw new Error("gl-texture2d: Must specify wrap mode for rows and columns");for(var e=0;e<2;++e)if(l.indexOf(t[e])<0)throw new Error("gl-texture2d: Unknown wrap mode "+t);this._wrapS=t[0],this._wrapT=t[1];var r=this.gl;return this.bind(),r.texParameteri(r.TEXTURE_2D,r.TEXTURE_WRAP_S,this._wrapS),r.texParameteri(r.TEXTURE_2D,r.TEXTURE_WRAP_T,this._wrapT),t}},shape:{get:function(){return this._shapeVector},set:function(t){if(Array.isArray(t)){if(2!==t.length)throw new Error("gl-texture2d: Invalid texture shape")}else t=[0|t,0|t];return h(this,0|t[0],0|t[1]),[0|t[0],0|t[1]]}},width:{get:function(){return this._shape[0]},set:function(t){return h(this,t|=0,this._shape[1]),t}},height:{get:function(){return this._shape[1]},set:function(t){return t|=0,h(this,this._shape[0],t),t}}}),d.bind=function(t){var e=this.gl;return void 0!==t&&e.activeTexture(e.TEXTURE0+(0|t)),e.bindTexture(e.TEXTURE_2D,this.handle),void 0!==t?0|t:e.getParameter(e.ACTIVE_TEXTURE)-e.TEXTURE0},d.dispose=function(){this.gl.deleteTexture(this.handle)},d.generateMipmap=function(){this.bind(),this.gl.generateMipmap(this.gl.TEXTURE_2D);for(var t=Math.min(this._shape[0],this._shape[1]),e=0;t>0;++e,t>>>=1)this._mipLevels.indexOf(e)<0&&this._mipLevels.push(e)},d.setPixels=function(t,e,r,o){var s=this.gl;this.bind(),Array.isArray(e)?(o=r,r=0|e[1],e=0|e[0]):(e=e||0,r=r||0),o=o||0;var l=u(t)?t:t.raw;if(l){this._mipLevels.indexOf(o)<0?(s.texImage2D(s.TEXTURE_2D,0,this.format,this.format,this.type,l),this._mipLevels.push(o)):s.texSubImage2D(s.TEXTURE_2D,o,e,r,this.format,this.type,l)}else{if(!(t.shape&&t.stride&&t.data))throw new Error("gl-texture2d: Unsupported data type");if(t.shape.length<2||e+t.shape[1]>this._shape[1]>>>o||r+t.shape[0]>this._shape[0]>>>o||e<0||r<0)throw new Error("gl-texture2d: Texture dimensions are out of bounds");!function(t,e,r,o,s,l,c,u){var h=u.dtype,p=u.shape.slice();if(p.length<2||p.length>3)throw new Error("gl-texture2d: Invalid ndarray, must be 2d or 3d");var d=0,g=0,v=m(p,u.stride.slice());"float32"===h?d=t.FLOAT:"float64"===h?(d=t.FLOAT,v=!1,h="float32"):"uint8"===h?d=t.UNSIGNED_BYTE:(d=t.UNSIGNED_BYTE,v=!1,h="uint8");if(2===p.length)g=t.LUMINANCE,p=[p[0],p[1],1],u=n(u.data,p,[u.stride[0],u.stride[1],1],u.offset);else{if(3!==p.length)throw new Error("gl-texture2d: Invalid shape for texture");if(1===p[2])g=t.ALPHA;else if(2===p[2])g=t.LUMINANCE_ALPHA;else if(3===p[2])g=t.RGB;else{if(4!==p[2])throw new Error("gl-texture2d: Invalid shape for pixel coords");g=t.RGBA}p[2]}g!==t.LUMINANCE&&g!==t.ALPHA||s!==t.LUMINANCE&&s!==t.ALPHA||(g=s);if(g!==s)throw new Error("gl-texture2d: Incompatible texture format for setPixels");var y=u.size,x=c.indexOf(o)<0;x&&c.push(o);if(d===l&&v)0===u.offset&&u.data.length===y?x?t.texImage2D(t.TEXTURE_2D,o,s,p[0],p[1],0,s,l,u.data):t.texSubImage2D(t.TEXTURE_2D,o,e,r,p[0],p[1],s,l,u.data):x?t.texImage2D(t.TEXTURE_2D,o,s,p[0],p[1],0,s,l,u.data.subarray(u.offset,u.offset+y)):t.texSubImage2D(t.TEXTURE_2D,o,e,r,p[0],p[1],s,l,u.data.subarray(u.offset,u.offset+y));else{var b;b=l===t.FLOAT?a.mallocFloat32(y):a.mallocUint8(y);var _=n(b,p,[p[2],p[2]*p[0],1]);d===t.FLOAT&&l===t.UNSIGNED_BYTE?f(_,u):i.assign(_,u),x?t.texImage2D(t.TEXTURE_2D,o,s,p[0],p[1],0,s,l,b.subarray(0,y)):t.texSubImage2D(t.TEXTURE_2D,o,e,r,p[0],p[1],s,l,b.subarray(0,y)),l===t.FLOAT?a.freeFloat32(b):a.freeUint8(b)}}(s,e,r,o,this.format,this.type,this._mipLevels,t)}}},{ndarray:259,"ndarray-ops":254,"typedarray-pool":308}],147:[function(t,e,r){"use strict";e.exports=function(t,e,r){e?e.bind():t.bindBuffer(t.ELEMENT_ARRAY_BUFFER,null);var n=0|t.getParameter(t.MAX_VERTEX_ATTRIBS);if(r){if(r.length>n)throw new Error("gl-vao: Too many vertex attributes");for(var i=0;i1?0:Math.acos(s)};var n=t("./fromValues"),i=t("./normalize"),a=t("./dot")},{"./dot":162,"./fromValues":168,"./normalize":179}],153:[function(t,e,r){e.exports=function(t,e){return t[0]=Math.ceil(e[0]),t[1]=Math.ceil(e[1]),t[2]=Math.ceil(e[2]),t}},{}],154:[function(t,e,r){e.exports=function(t){var e=new Float32Array(3);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e}},{}],155:[function(t,e,r){e.exports=function(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t}},{}],156:[function(t,e,r){e.exports=function(){var t=new Float32Array(3);return t[0]=0,t[1]=0,t[2]=0,t}},{}],157:[function(t,e,r){e.exports=function(t,e,r){var n=e[0],i=e[1],a=e[2],o=r[0],s=r[1],l=r[2];return t[0]=i*l-a*s,t[1]=a*o-n*l,t[2]=n*s-i*o,t}},{}],158:[function(t,e,r){e.exports=t("./distance")},{"./distance":159}],159:[function(t,e,r){e.exports=function(t,e){var r=e[0]-t[0],n=e[1]-t[1],i=e[2]-t[2];return Math.sqrt(r*r+n*n+i*i)}},{}],160:[function(t,e,r){e.exports=t("./divide")},{"./divide":161}],161:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]/r[0],t[1]=e[1]/r[1],t[2]=e[2]/r[2],t}},{}],162:[function(t,e,r){e.exports=function(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}},{}],163:[function(t,e,r){e.exports=1e-6},{}],164:[function(t,e,r){e.exports=function(t,e){var r=t[0],i=t[1],a=t[2],o=e[0],s=e[1],l=e[2];return Math.abs(r-o)<=n*Math.max(1,Math.abs(r),Math.abs(o))&&Math.abs(i-s)<=n*Math.max(1,Math.abs(i),Math.abs(s))&&Math.abs(a-l)<=n*Math.max(1,Math.abs(a),Math.abs(l))};var n=t("./epsilon")},{"./epsilon":163}],165:[function(t,e,r){e.exports=function(t,e){return t[0]===e[0]&&t[1]===e[1]&&t[2]===e[2]}},{}],166:[function(t,e,r){e.exports=function(t,e){return t[0]=Math.floor(e[0]),t[1]=Math.floor(e[1]),t[2]=Math.floor(e[2]),t}},{}],167:[function(t,e,r){e.exports=function(t,e,r,i,a,o){var s,l;e||(e=3);r||(r=0);l=i?Math.min(i*e+r,t.length):t.length;for(s=r;s0&&(a=1/Math.sqrt(a),t[0]=e[0]*a,t[1]=e[1]*a,t[2]=e[2]*a);return t}},{}],180:[function(t,e,r){e.exports=function(t,e){e=e||1;var r=2*Math.random()*Math.PI,n=2*Math.random()-1,i=Math.sqrt(1-n*n)*e;return t[0]=Math.cos(r)*i,t[1]=Math.sin(r)*i,t[2]=n*e,t}},{}],181:[function(t,e,r){e.exports=function(t,e,r,n){var i=r[1],a=r[2],o=e[1]-i,s=e[2]-a,l=Math.sin(n),c=Math.cos(n);return t[0]=e[0],t[1]=i+o*c-s*l,t[2]=a+o*l+s*c,t}},{}],182:[function(t,e,r){e.exports=function(t,e,r,n){var i=r[0],a=r[2],o=e[0]-i,s=e[2]-a,l=Math.sin(n),c=Math.cos(n);return t[0]=i+s*l+o*c,t[1]=e[1],t[2]=a+s*c-o*l,t}},{}],183:[function(t,e,r){e.exports=function(t,e,r,n){var i=r[0],a=r[1],o=e[0]-i,s=e[1]-a,l=Math.sin(n),c=Math.cos(n);return t[0]=i+o*c-s*l,t[1]=a+o*l+s*c,t[2]=e[2],t}},{}],184:[function(t,e,r){e.exports=function(t,e){return t[0]=Math.round(e[0]),t[1]=Math.round(e[1]),t[2]=Math.round(e[2]),t}},{}],185:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]*r,t[1]=e[1]*r,t[2]=e[2]*r,t}},{}],186:[function(t,e,r){e.exports=function(t,e,r,n){return t[0]=e[0]+r[0]*n,t[1]=e[1]+r[1]*n,t[2]=e[2]+r[2]*n,t}},{}],187:[function(t,e,r){e.exports=function(t,e,r,n){return t[0]=e,t[1]=r,t[2]=n,t}},{}],188:[function(t,e,r){e.exports=t("./squaredDistance")},{"./squaredDistance":190}],189:[function(t,e,r){e.exports=t("./squaredLength")},{"./squaredLength":191}],190:[function(t,e,r){e.exports=function(t,e){var r=e[0]-t[0],n=e[1]-t[1],i=e[2]-t[2];return r*r+n*n+i*i}},{}],191:[function(t,e,r){e.exports=function(t){var e=t[0],r=t[1],n=t[2];return e*e+r*r+n*n}},{}],192:[function(t,e,r){e.exports=t("./subtract")},{"./subtract":193}],193:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]-r[0],t[1]=e[1]-r[1],t[2]=e[2]-r[2],t}},{}],194:[function(t,e,r){e.exports=function(t,e,r){var n=e[0],i=e[1],a=e[2];return t[0]=n*r[0]+i*r[3]+a*r[6],t[1]=n*r[1]+i*r[4]+a*r[7],t[2]=n*r[2]+i*r[5]+a*r[8],t}},{}],195:[function(t,e,r){e.exports=function(t,e,r){var n=e[0],i=e[1],a=e[2],o=r[3]*n+r[7]*i+r[11]*a+r[15];return o=o||1,t[0]=(r[0]*n+r[4]*i+r[8]*a+r[12])/o,t[1]=(r[1]*n+r[5]*i+r[9]*a+r[13])/o,t[2]=(r[2]*n+r[6]*i+r[10]*a+r[14])/o,t}},{}],196:[function(t,e,r){e.exports=function(t,e,r){var n=e[0],i=e[1],a=e[2],o=r[0],s=r[1],l=r[2],c=r[3],u=c*n+s*a-l*i,f=c*i+l*n-o*a,h=c*a+o*i-s*n,p=-o*n-s*i-l*a;return t[0]=u*c+p*-o+f*-l-h*-s,t[1]=f*c+p*-s+h*-o-u*-l,t[2]=h*c+p*-l+u*-s-f*-o,t}},{}],197:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]+r[0],t[1]=e[1]+r[1],t[2]=e[2]+r[2],t[3]=e[3]+r[3],t}},{}],198:[function(t,e,r){e.exports=function(t){var e=new Float32Array(4);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e[3]=t[3],e}},{}],199:[function(t,e,r){e.exports=function(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t[3]=e[3],t}},{}],200:[function(t,e,r){e.exports=function(){var t=new Float32Array(4);return t[0]=0,t[1]=0,t[2]=0,t[3]=0,t}},{}],201:[function(t,e,r){e.exports=function(t,e){var r=e[0]-t[0],n=e[1]-t[1],i=e[2]-t[2],a=e[3]-t[3];return Math.sqrt(r*r+n*n+i*i+a*a)}},{}],202:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]/r[0],t[1]=e[1]/r[1],t[2]=e[2]/r[2],t[3]=e[3]/r[3],t}},{}],203:[function(t,e,r){e.exports=function(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]+t[3]*e[3]}},{}],204:[function(t,e,r){e.exports=function(t,e,r,n){var i=new Float32Array(4);return i[0]=t,i[1]=e,i[2]=r,i[3]=n,i}},{}],205:[function(t,e,r){e.exports={create:t("./create"),clone:t("./clone"),fromValues:t("./fromValues"),copy:t("./copy"),set:t("./set"),add:t("./add"),subtract:t("./subtract"),multiply:t("./multiply"),divide:t("./divide"),min:t("./min"),max:t("./max"),scale:t("./scale"),scaleAndAdd:t("./scaleAndAdd"),distance:t("./distance"),squaredDistance:t("./squaredDistance"),length:t("./length"),squaredLength:t("./squaredLength"),negate:t("./negate"),inverse:t("./inverse"),normalize:t("./normalize"),dot:t("./dot"),lerp:t("./lerp"),random:t("./random"),transformMat4:t("./transformMat4"),transformQuat:t("./transformQuat")}},{"./add":197,"./clone":198,"./copy":199,"./create":200,"./distance":201,"./divide":202,"./dot":203,"./fromValues":204,"./inverse":206,"./length":207,"./lerp":208,"./max":209,"./min":210,"./multiply":211,"./negate":212,"./normalize":213,"./random":214,"./scale":215,"./scaleAndAdd":216,"./set":217,"./squaredDistance":218,"./squaredLength":219,"./subtract":220,"./transformMat4":221,"./transformQuat":222}],206:[function(t,e,r){e.exports=function(t,e){return t[0]=1/e[0],t[1]=1/e[1],t[2]=1/e[2],t[3]=1/e[3],t}},{}],207:[function(t,e,r){e.exports=function(t){var e=t[0],r=t[1],n=t[2],i=t[3];return Math.sqrt(e*e+r*r+n*n+i*i)}},{}],208:[function(t,e,r){e.exports=function(t,e,r,n){var i=e[0],a=e[1],o=e[2],s=e[3];return t[0]=i+n*(r[0]-i),t[1]=a+n*(r[1]-a),t[2]=o+n*(r[2]-o),t[3]=s+n*(r[3]-s),t}},{}],209:[function(t,e,r){e.exports=function(t,e,r){return t[0]=Math.max(e[0],r[0]),t[1]=Math.max(e[1],r[1]),t[2]=Math.max(e[2],r[2]),t[3]=Math.max(e[3],r[3]),t}},{}],210:[function(t,e,r){e.exports=function(t,e,r){return t[0]=Math.min(e[0],r[0]),t[1]=Math.min(e[1],r[1]),t[2]=Math.min(e[2],r[2]),t[3]=Math.min(e[3],r[3]),t}},{}],211:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]*r[0],t[1]=e[1]*r[1],t[2]=e[2]*r[2],t[3]=e[3]*r[3],t}},{}],212:[function(t,e,r){e.exports=function(t,e){return t[0]=-e[0],t[1]=-e[1],t[2]=-e[2],t[3]=-e[3],t}},{}],213:[function(t,e,r){e.exports=function(t,e){var r=e[0],n=e[1],i=e[2],a=e[3],o=r*r+n*n+i*i+a*a;o>0&&(o=1/Math.sqrt(o),t[0]=r*o,t[1]=n*o,t[2]=i*o,t[3]=a*o);return t}},{}],214:[function(t,e,r){var n=t("./normalize"),i=t("./scale");e.exports=function(t,e){return e=e||1,t[0]=Math.random(),t[1]=Math.random(),t[2]=Math.random(),t[3]=Math.random(),n(t,t),i(t,t,e),t}},{"./normalize":213,"./scale":215}],215:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]*r,t[1]=e[1]*r,t[2]=e[2]*r,t[3]=e[3]*r,t}},{}],216:[function(t,e,r){e.exports=function(t,e,r,n){return t[0]=e[0]+r[0]*n,t[1]=e[1]+r[1]*n,t[2]=e[2]+r[2]*n,t[3]=e[3]+r[3]*n,t}},{}],217:[function(t,e,r){e.exports=function(t,e,r,n,i){return t[0]=e,t[1]=r,t[2]=n,t[3]=i,t}},{}],218:[function(t,e,r){e.exports=function(t,e){var r=e[0]-t[0],n=e[1]-t[1],i=e[2]-t[2],a=e[3]-t[3];return r*r+n*n+i*i+a*a}},{}],219:[function(t,e,r){e.exports=function(t){var e=t[0],r=t[1],n=t[2],i=t[3];return e*e+r*r+n*n+i*i}},{}],220:[function(t,e,r){e.exports=function(t,e,r){return t[0]=e[0]-r[0],t[1]=e[1]-r[1],t[2]=e[2]-r[2],t[3]=e[3]-r[3],t}},{}],221:[function(t,e,r){e.exports=function(t,e,r){var n=e[0],i=e[1],a=e[2],o=e[3];return t[0]=r[0]*n+r[4]*i+r[8]*a+r[12]*o,t[1]=r[1]*n+r[5]*i+r[9]*a+r[13]*o,t[2]=r[2]*n+r[6]*i+r[10]*a+r[14]*o,t[3]=r[3]*n+r[7]*i+r[11]*a+r[15]*o,t}},{}],222:[function(t,e,r){e.exports=function(t,e,r){var n=e[0],i=e[1],a=e[2],o=r[0],s=r[1],l=r[2],c=r[3],u=c*n+s*a-l*i,f=c*i+l*n-o*a,h=c*a+o*i-s*n,p=-o*n-s*i-l*a;return t[0]=u*c+p*-o+f*-l-h*-s,t[1]=f*c+p*-s+h*-o-u*-l,t[2]=h*c+p*-l+u*-s-f*-o,t[3]=e[3],t}},{}],223:[function(t,e,r){var n=t("glsl-tokenizer"),i=t("atob-lite");e.exports=function(t){for(var e=Array.isArray(t)?t:n(t),r=0;r0)continue;r=t.slice(0,1).join("")}return A(r),v+=r.length,(p=p.slice(r.length)).length}}function I(){return/[^a-fA-F0-9]/.test(e)?(A(p.join("")),h=999,u):(p.push(e),r=e,u+1)}function O(){return"."===e||/[eE]/.test(e)?(p.push(e),h=5,r=e,u+1):"x"===e&&1===p.length&&"0"===p[0]?(h=11,p.push(e),r=e,u+1):/[^\d]/.test(e)?(A(p.join("")),h=999,u):(p.push(e),r=e,u+1)}function z(){return"f"===e&&(p.push(e),r=e,u+=1),/[eE]/.test(e)?(p.push(e),r=e,u+1):("-"!==e&&"+"!==e||!/[eE]/.test(r))&&/[^\d]/.test(e)?(A(p.join("")),h=999,u):(p.push(e),r=e,u+1)}function D(){if(/[^\d\w_]/.test(e)){var t=p.join("");return h=k[t]?8:T[t]?7:6,A(p.join("")),h=999,u}return p.push(e),r=e,u+1}};var n=t("./lib/literals"),i=t("./lib/operators"),a=t("./lib/builtins"),o=t("./lib/literals-300es"),s=t("./lib/builtins-300es"),l=["block-comment","line-comment","preprocessor","operator","integer","float","ident","builtin","keyword","whitespace","eof","integer"]},{"./lib/builtins":226,"./lib/builtins-300es":225,"./lib/literals":228,"./lib/literals-300es":227,"./lib/operators":229}],225:[function(t,e,r){var n=t("./builtins");n=n.slice().filter((function(t){return!/^(gl\_|texture)/.test(t)})),e.exports=n.concat(["gl_VertexID","gl_InstanceID","gl_Position","gl_PointSize","gl_FragCoord","gl_FrontFacing","gl_FragDepth","gl_PointCoord","gl_MaxVertexAttribs","gl_MaxVertexUniformVectors","gl_MaxVertexOutputVectors","gl_MaxFragmentInputVectors","gl_MaxVertexTextureImageUnits","gl_MaxCombinedTextureImageUnits","gl_MaxTextureImageUnits","gl_MaxFragmentUniformVectors","gl_MaxDrawBuffers","gl_MinProgramTexelOffset","gl_MaxProgramTexelOffset","gl_DepthRangeParameters","gl_DepthRange","trunc","round","roundEven","isnan","isinf","floatBitsToInt","floatBitsToUint","intBitsToFloat","uintBitsToFloat","packSnorm2x16","unpackSnorm2x16","packUnorm2x16","unpackUnorm2x16","packHalf2x16","unpackHalf2x16","outerProduct","transpose","determinant","inverse","texture","textureSize","textureProj","textureLod","textureOffset","texelFetch","texelFetchOffset","textureProjOffset","textureLodOffset","textureProjLod","textureProjLodOffset","textureGrad","textureGradOffset","textureProjGrad","textureProjGradOffset"])},{"./builtins":226}],226:[function(t,e,r){e.exports=["abs","acos","all","any","asin","atan","ceil","clamp","cos","cross","dFdx","dFdy","degrees","distance","dot","equal","exp","exp2","faceforward","floor","fract","gl_BackColor","gl_BackLightModelProduct","gl_BackLightProduct","gl_BackMaterial","gl_BackSecondaryColor","gl_ClipPlane","gl_ClipVertex","gl_Color","gl_DepthRange","gl_DepthRangeParameters","gl_EyePlaneQ","gl_EyePlaneR","gl_EyePlaneS","gl_EyePlaneT","gl_Fog","gl_FogCoord","gl_FogFragCoord","gl_FogParameters","gl_FragColor","gl_FragCoord","gl_FragData","gl_FragDepth","gl_FragDepthEXT","gl_FrontColor","gl_FrontFacing","gl_FrontLightModelProduct","gl_FrontLightProduct","gl_FrontMaterial","gl_FrontSecondaryColor","gl_LightModel","gl_LightModelParameters","gl_LightModelProducts","gl_LightProducts","gl_LightSource","gl_LightSourceParameters","gl_MaterialParameters","gl_MaxClipPlanes","gl_MaxCombinedTextureImageUnits","gl_MaxDrawBuffers","gl_MaxFragmentUniformComponents","gl_MaxLights","gl_MaxTextureCoords","gl_MaxTextureImageUnits","gl_MaxTextureUnits","gl_MaxVaryingFloats","gl_MaxVertexAttribs","gl_MaxVertexTextureImageUnits","gl_MaxVertexUniformComponents","gl_ModelViewMatrix","gl_ModelViewMatrixInverse","gl_ModelViewMatrixInverseTranspose","gl_ModelViewMatrixTranspose","gl_ModelViewProjectionMatrix","gl_ModelViewProjectionMatrixInverse","gl_ModelViewProjectionMatrixInverseTranspose","gl_ModelViewProjectionMatrixTranspose","gl_MultiTexCoord0","gl_MultiTexCoord1","gl_MultiTexCoord2","gl_MultiTexCoord3","gl_MultiTexCoord4","gl_MultiTexCoord5","gl_MultiTexCoord6","gl_MultiTexCoord7","gl_Normal","gl_NormalMatrix","gl_NormalScale","gl_ObjectPlaneQ","gl_ObjectPlaneR","gl_ObjectPlaneS","gl_ObjectPlaneT","gl_Point","gl_PointCoord","gl_PointParameters","gl_PointSize","gl_Position","gl_ProjectionMatrix","gl_ProjectionMatrixInverse","gl_ProjectionMatrixInverseTranspose","gl_ProjectionMatrixTranspose","gl_SecondaryColor","gl_TexCoord","gl_TextureEnvColor","gl_TextureMatrix","gl_TextureMatrixInverse","gl_TextureMatrixInverseTranspose","gl_TextureMatrixTranspose","gl_Vertex","greaterThan","greaterThanEqual","inversesqrt","length","lessThan","lessThanEqual","log","log2","matrixCompMult","max","min","mix","mod","normalize","not","notEqual","pow","radians","reflect","refract","sign","sin","smoothstep","sqrt","step","tan","texture2D","texture2DLod","texture2DProj","texture2DProjLod","textureCube","textureCubeLod","texture2DLodEXT","texture2DProjLodEXT","textureCubeLodEXT","texture2DGradEXT","texture2DProjGradEXT","textureCubeGradEXT"]},{}],227:[function(t,e,r){var n=t("./literals");e.exports=n.slice().concat(["layout","centroid","smooth","case","mat2x2","mat2x3","mat2x4","mat3x2","mat3x3","mat3x4","mat4x2","mat4x3","mat4x4","uvec2","uvec3","uvec4","samplerCubeShadow","sampler2DArray","sampler2DArrayShadow","isampler2D","isampler3D","isamplerCube","isampler2DArray","usampler2D","usampler3D","usamplerCube","usampler2DArray","coherent","restrict","readonly","writeonly","resource","atomic_uint","noperspective","patch","sample","subroutine","common","partition","active","filter","image1D","image2D","image3D","imageCube","iimage1D","iimage2D","iimage3D","iimageCube","uimage1D","uimage2D","uimage3D","uimageCube","image1DArray","image2DArray","iimage1DArray","iimage2DArray","uimage1DArray","uimage2DArray","image1DShadow","image2DShadow","image1DArrayShadow","image2DArrayShadow","imageBuffer","iimageBuffer","uimageBuffer","sampler1DArray","sampler1DArrayShadow","isampler1D","isampler1DArray","usampler1D","usampler1DArray","isampler2DRect","usampler2DRect","samplerBuffer","isamplerBuffer","usamplerBuffer","sampler2DMS","isampler2DMS","usampler2DMS","sampler2DMSArray","isampler2DMSArray","usampler2DMSArray"])},{"./literals":228}],228:[function(t,e,r){e.exports=["precision","highp","mediump","lowp","attribute","const","uniform","varying","break","continue","do","for","while","if","else","in","out","inout","float","int","uint","void","bool","true","false","discard","return","mat2","mat3","mat4","vec2","vec3","vec4","ivec2","ivec3","ivec4","bvec2","bvec3","bvec4","sampler1D","sampler2D","sampler3D","samplerCube","sampler1DShadow","sampler2DShadow","struct","asm","class","union","enum","typedef","template","this","packed","goto","switch","default","inline","noinline","volatile","public","static","extern","external","interface","long","short","double","half","fixed","unsigned","input","output","hvec2","hvec3","hvec4","dvec2","dvec3","dvec4","fvec2","fvec3","fvec4","sampler2DRect","sampler3DRect","sampler2DRectShadow","sizeof","cast","namespace","using"]},{}],229:[function(t,e,r){e.exports=["<<=",">>=","++","--","<<",">>","<=",">=","==","!=","&&","||","+=","-=","*=","/=","%=","&=","^^","^=","|=","(",")","[","]",".","!","~","*","/","%","+","-","<",">","&","^","|","?",":","=",",",";","{","}"]},{}],230:[function(t,e,r){var n=t("./index");e.exports=function(t,e){var r=n(e),i=[];return i=(i=i.concat(r(t))).concat(r(null))}},{"./index":224}],231:[function(t,e,r){e.exports=function(t){"string"==typeof t&&(t=[t]);for(var e=[].slice.call(arguments,1),r=[],n=0;n0;)for(var s=(t=o.pop()).adjacent,l=0;l<=r;++l){var c=s[l];if(c.boundary&&!(c.lastVisited<=-n)){for(var u=c.vertices,f=0;f<=r;++f){var h=u[f];i[f]=h<0?e:a[h]}var p=this.orient();if(p>0)return c;c.lastVisited=-n,0===p&&o.push(c)}}return null},u.walk=function(t,e){var r=this.vertices.length-1,n=this.dimension,i=this.vertices,a=this.tuple,o=e?this.interior.length*Math.random()|0:this.interior.length-1,s=this.interior[o];t:for(;!s.boundary;){for(var l=s.vertices,c=s.adjacent,u=0;u<=n;++u)a[u]=i[l[u]];s.lastVisited=r;for(u=0;u<=n;++u){var f=c[u];if(!(f.lastVisited>=r)){var h=a[u];a[u]=t;var p=this.orient();if(a[u]=h,p<0){s=f;continue t}f.boundary?f.lastVisited=-r:f.lastVisited=r}}return}return s},u.addPeaks=function(t,e){var r=this.vertices.length-1,n=this.dimension,i=this.vertices,l=this.tuple,c=this.interior,u=this.simplices,f=[e];e.lastVisited=r,e.vertices[e.vertices.indexOf(-1)]=r,e.boundary=!1,c.push(e);for(var h=[];f.length>0;){var p=(e=f.pop()).vertices,d=e.adjacent,m=p.indexOf(r);if(!(m<0))for(var g=0;g<=n;++g)if(g!==m){var v=d[g];if(v.boundary&&!(v.lastVisited>=r)){var y=v.vertices;if(v.lastVisited!==-r){for(var x=0,b=0;b<=n;++b)y[b]<0?(x=b,l[b]=t):l[b]=i[y[b]];if(this.orient()>0){y[x]=r,v.boundary=!1,c.push(v),f.push(v),v.lastVisited=r;continue}v.lastVisited=-r}var _=v.adjacent,w=p.slice(),T=d.slice(),k=new a(w,T,!0);u.push(k);var A=_.indexOf(e);if(!(A<0)){_[A]=k,T[m]=v,w[g]=-1,T[g]=e,d[g]=k,k.flip();for(b=0;b<=n;++b){var M=w[b];if(!(M<0||M===r)){for(var S=new Array(n-1),E=0,L=0;L<=n;++L){var C=w[L];C<0||L===b||(S[E++]=C)}h.push(new o(S,k,b))}}}}}}h.sort(s);for(g=0;g+1=0?o[l++]=s[u]:c=1&u;if(c===(1&t)){var f=o[0];o[0]=o[1],o[1]=f}e.push(o)}}return e}},{"robust-orientation":284,"simplicial-complex":293}],234:[function(t,e,r){"use strict";var n=t("binary-search-bounds");function i(t,e,r,n,i){this.mid=t,this.left=e,this.right=r,this.leftPoints=n,this.rightPoints=i,this.count=(e?e.count:0)+(r?r.count:0)+n.length}e.exports=function(t){if(!t||0===t.length)return new v(null);return new v(g(t))};var a=i.prototype;function o(t,e){t.mid=e.mid,t.left=e.left,t.right=e.right,t.leftPoints=e.leftPoints,t.rightPoints=e.rightPoints,t.count=e.count}function s(t,e){var r=g(e);t.mid=r.mid,t.left=r.left,t.right=r.right,t.leftPoints=r.leftPoints,t.rightPoints=r.rightPoints,t.count=r.count}function l(t,e){var r=t.intervals([]);r.push(e),s(t,r)}function c(t,e){var r=t.intervals([]),n=r.indexOf(e);return n<0?0:(r.splice(n,1),s(t,r),1)}function u(t,e,r){for(var n=0;n=0&&t[n][1]>=e;--n){var i=r(t[n]);if(i)return i}}function h(t,e){for(var r=0;r>1],a=[],o=[],s=[];for(r=0;r3*(e+1)?l(this,t):this.left.insert(t):this.left=g([t]);else if(t[0]>this.mid)this.right?4*(this.right.count+1)>3*(e+1)?l(this,t):this.right.insert(t):this.right=g([t]);else{var r=n.ge(this.leftPoints,t,d),i=n.ge(this.rightPoints,t,m);this.leftPoints.splice(r,0,t),this.rightPoints.splice(i,0,t)}},a.remove=function(t){var e=this.count-this.leftPoints;if(t[1]3*(e-1)?c(this,t):2===(s=this.left.remove(t))?(this.left=null,this.count-=1,1):(1===s&&(this.count-=1),s):0;if(t[0]>this.mid)return this.right?4*(this.left?this.left.count:0)>3*(e-1)?c(this,t):2===(s=this.right.remove(t))?(this.right=null,this.count-=1,1):(1===s&&(this.count-=1),s):0;if(1===this.count)return this.leftPoints[0]===t?2:0;if(1===this.leftPoints.length&&this.leftPoints[0]===t){if(this.left&&this.right){for(var r=this,i=this.left;i.right;)r=i,i=i.right;if(r===this)i.right=this.right;else{var a=this.left,s=this.right;r.count-=i.count,r.right=i.left,i.left=a,i.right=s}o(this,i),this.count=(this.left?this.left.count:0)+(this.right?this.right.count:0)+this.leftPoints.length}else this.left?o(this,this.left):o(this,this.right);return 1}for(a=n.ge(this.leftPoints,t,d);athis.mid){var r;if(this.right)if(r=this.right.queryPoint(t,e))return r;return f(this.rightPoints,t,e)}return h(this.leftPoints,e)},a.queryInterval=function(t,e,r){var n;if(tthis.mid&&this.right&&(n=this.right.queryInterval(t,e,r)))return n;return ethis.mid?f(this.rightPoints,t,r):h(this.leftPoints,r)};var y=v.prototype;y.insert=function(t){this.root?this.root.insert(t):this.root=new i(t[0],null,null,[t],[t])},y.remove=function(t){if(this.root){var e=this.root.remove(t);return 2===e&&(this.root=null),0!==e}return!1},y.queryPoint=function(t,e){if(this.root)return this.root.queryPoint(t,e)},y.queryInterval=function(t,e,r){if(t<=e&&this.root)return this.root.queryInterval(t,e,r)},Object.defineProperty(y,"count",{get:function(){return this.root?this.root.count:0}}),Object.defineProperty(y,"intervals",{get:function(){return this.root?this.root.intervals([]):[]}})},{"binary-search-bounds":31}],235:[function(t,e,r){"use strict";e.exports=function(t){for(var e=new Array(t),r=0;r - * @license MIT - */ -e.exports=function(t){return null!=t&&(n(t)||function(t){return"function"==typeof t.readFloatLE&&"function"==typeof t.slice&&n(t.slice(0,0))}(t)||!!t._isBuffer)}},{}],238:[function(t,e,r){"use strict";e.exports=a,e.exports.isMobile=a,e.exports.default=a;var n=/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series[46]0|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino/i,i=/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series[46]0|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino|android|ipad|playbook|silk/i;function a(t){t||(t={});var e=t.ua;if(e||"undefined"==typeof navigator||(e=navigator.userAgent),e&&e.headers&&"string"==typeof e.headers["user-agent"]&&(e=e.headers["user-agent"]),"string"!=typeof e)return!1;var r=t.tablet?i.test(e):n.test(e);return!r&&t.tablet&&t.featureDetect&&navigator&&navigator.maxTouchPoints>1&&-1!==e.indexOf("Macintosh")&&-1!==e.indexOf("Safari")&&(r=!0),r}},{}],239:[function(t,e,r){"use strict";e.exports=function(t){for(var e,r=t.length,n=0;n13)&&32!==e&&133!==e&&160!==e&&5760!==e&&6158!==e&&(e<8192||e>8205)&&8232!==e&&8233!==e&&8239!==e&&8287!==e&&8288!==e&&12288!==e&&65279!==e)return!1;return!0}},{}],240:[function(t,e,r){e.exports=function(t,e,r){return t*(1-r)+e*r}},{}],241:[function(t,e,r){var n=t("./normalize"),i=t("gl-mat4/create"),a=t("gl-mat4/clone"),o=t("gl-mat4/determinant"),s=t("gl-mat4/invert"),l=t("gl-mat4/transpose"),c={length:t("gl-vec3/length"),normalize:t("gl-vec3/normalize"),dot:t("gl-vec3/dot"),cross:t("gl-vec3/cross")},u=i(),f=i(),h=[0,0,0,0],p=[[0,0,0],[0,0,0],[0,0,0]],d=[0,0,0];function m(t,e,r,n,i){t[0]=e[0]*n+r[0]*i,t[1]=e[1]*n+r[1]*i,t[2]=e[2]*n+r[2]*i}e.exports=function(t,e,r,i,g,v){if(e||(e=[0,0,0]),r||(r=[0,0,0]),i||(i=[0,0,0]),g||(g=[0,0,0,1]),v||(v=[0,0,0,1]),!n(u,t))return!1;if(a(f,u),f[3]=0,f[7]=0,f[11]=0,f[15]=1,Math.abs(o(f)<1e-8))return!1;var y,x,b,_,w,T,k,A=u[3],M=u[7],S=u[11],E=u[12],L=u[13],C=u[14],P=u[15];if(0!==A||0!==M||0!==S){if(h[0]=A,h[1]=M,h[2]=S,h[3]=P,!s(f,f))return!1;l(f,f),y=g,b=f,_=(x=h)[0],w=x[1],T=x[2],k=x[3],y[0]=b[0]*_+b[4]*w+b[8]*T+b[12]*k,y[1]=b[1]*_+b[5]*w+b[9]*T+b[13]*k,y[2]=b[2]*_+b[6]*w+b[10]*T+b[14]*k,y[3]=b[3]*_+b[7]*w+b[11]*T+b[15]*k}else g[0]=g[1]=g[2]=0,g[3]=1;if(e[0]=E,e[1]=L,e[2]=C,function(t,e){t[0][0]=e[0],t[0][1]=e[1],t[0][2]=e[2],t[1][0]=e[4],t[1][1]=e[5],t[1][2]=e[6],t[2][0]=e[8],t[2][1]=e[9],t[2][2]=e[10]}(p,u),r[0]=c.length(p[0]),c.normalize(p[0],p[0]),i[0]=c.dot(p[0],p[1]),m(p[1],p[1],p[0],1,-i[0]),r[1]=c.length(p[1]),c.normalize(p[1],p[1]),i[0]/=r[1],i[1]=c.dot(p[0],p[2]),m(p[2],p[2],p[0],1,-i[1]),i[2]=c.dot(p[1],p[2]),m(p[2],p[2],p[1],1,-i[2]),r[2]=c.length(p[2]),c.normalize(p[2],p[2]),i[1]/=r[2],i[2]/=r[2],c.cross(d,p[1],p[2]),c.dot(p[0],d)<0)for(var I=0;I<3;I++)r[I]*=-1,p[I][0]*=-1,p[I][1]*=-1,p[I][2]*=-1;return v[0]=.5*Math.sqrt(Math.max(1+p[0][0]-p[1][1]-p[2][2],0)),v[1]=.5*Math.sqrt(Math.max(1-p[0][0]+p[1][1]-p[2][2],0)),v[2]=.5*Math.sqrt(Math.max(1-p[0][0]-p[1][1]+p[2][2],0)),v[3]=.5*Math.sqrt(Math.max(1+p[0][0]+p[1][1]+p[2][2],0)),p[2][1]>p[1][2]&&(v[0]=-v[0]),p[0][2]>p[2][0]&&(v[1]=-v[1]),p[1][0]>p[0][1]&&(v[2]=-v[2]),!0}},{"./normalize":242,"gl-mat4/clone":92,"gl-mat4/create":93,"gl-mat4/determinant":94,"gl-mat4/invert":98,"gl-mat4/transpose":109,"gl-vec3/cross":157,"gl-vec3/dot":162,"gl-vec3/length":172,"gl-vec3/normalize":179}],242:[function(t,e,r){e.exports=function(t,e){var r=e[15];if(0===r)return!1;for(var n=1/r,i=0;i<16;i++)t[i]=e[i]*n;return!0}},{}],243:[function(t,e,r){var n=t("gl-vec3/lerp"),i=t("mat4-recompose"),a=t("mat4-decompose"),o=t("gl-mat4/determinant"),s=t("quat-slerp"),l=f(),c=f(),u=f();function f(){return{translate:h(),scale:h(1),skew:h(),perspective:[0,0,0,1],quaternion:[0,0,0,1]}}function h(t){return[t||0,t||0,t||0]}e.exports=function(t,e,r,f){if(0===o(e)||0===o(r))return!1;var h=a(e,l.translate,l.scale,l.skew,l.perspective,l.quaternion),p=a(r,c.translate,c.scale,c.skew,c.perspective,c.quaternion);return!(!h||!p)&&(n(u.translate,l.translate,c.translate,f),n(u.skew,l.skew,c.skew,f),n(u.scale,l.scale,c.scale,f),n(u.perspective,l.perspective,c.perspective,f),s(u.quaternion,l.quaternion,c.quaternion,f),i(t,u.translate,u.scale,u.skew,u.perspective,u.quaternion),!0)}},{"gl-mat4/determinant":94,"gl-vec3/lerp":173,"mat4-decompose":241,"mat4-recompose":244,"quat-slerp":271}],244:[function(t,e,r){var n={identity:t("gl-mat4/identity"),translate:t("gl-mat4/translate"),multiply:t("gl-mat4/multiply"),create:t("gl-mat4/create"),scale:t("gl-mat4/scale"),fromRotationTranslation:t("gl-mat4/fromRotationTranslation")},i=(n.create(),n.create());e.exports=function(t,e,r,a,o,s){return n.identity(t),n.fromRotationTranslation(t,s,e),t[3]=o[0],t[7]=o[1],t[11]=o[2],t[15]=o[3],n.identity(i),0!==a[2]&&(i[9]=a[2],n.multiply(t,t,i)),0!==a[1]&&(i[9]=0,i[8]=a[1],n.multiply(t,t,i)),0!==a[0]&&(i[8]=0,i[4]=a[0],n.multiply(t,t,i)),n.scale(t,t,r),t}},{"gl-mat4/create":93,"gl-mat4/fromRotationTranslation":96,"gl-mat4/identity":97,"gl-mat4/multiply":100,"gl-mat4/scale":107,"gl-mat4/translate":108}],245:[function(t,e,r){"use strict";var n=t("binary-search-bounds"),i=t("mat4-interpolate"),a=t("gl-mat4/invert"),o=t("gl-mat4/rotateX"),s=t("gl-mat4/rotateY"),l=t("gl-mat4/rotateZ"),c=t("gl-mat4/lookAt"),u=t("gl-mat4/translate"),f=(t("gl-mat4/scale"),t("gl-vec3/normalize")),h=[0,0,0];function p(t){this._components=t.slice(),this._time=[0],this.prevMatrix=t.slice(),this.nextMatrix=t.slice(),this.computedMatrix=t.slice(),this.computedInverse=t.slice(),this.computedEye=[0,0,0],this.computedUp=[0,0,0],this.computedCenter=[0,0,0],this.computedRadius=[0],this._limits=[-1/0,1/0]}e.exports=function(t){return new p((t=t||{}).matrix||[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1])};var d=p.prototype;d.recalcMatrix=function(t){var e=this._time,r=n.le(e,t),o=this.computedMatrix;if(!(r<0)){var s=this._components;if(r===e.length-1)for(var l=16*r,c=0;c<16;++c)o[c]=s[l++];else{var u=e[r+1]-e[r],h=(l=16*r,this.prevMatrix),p=!0;for(c=0;c<16;++c)h[c]=s[l++];var d=this.nextMatrix;for(c=0;c<16;++c)d[c]=s[l++],p=p&&h[c]===d[c];if(u<1e-6||p)for(c=0;c<16;++c)o[c]=h[c];else i(o,h,d,(t-e[r])/u)}var m=this.computedUp;m[0]=o[1],m[1]=o[5],m[2]=o[9],f(m,m);var g=this.computedInverse;a(g,o);var v=this.computedEye,y=g[15];v[0]=g[12]/y,v[1]=g[13]/y,v[2]=g[14]/y;var x=this.computedCenter,b=Math.exp(this.computedRadius[0]);for(c=0;c<3;++c)x[c]=v[c]-o[2+4*c]*b}},d.idle=function(t){if(!(t1&&n(t[o[u-2]],t[o[u-1]],c)<=0;)u-=1,o.pop();for(o.push(l),u=s.length;u>1&&n(t[s[u-2]],t[s[u-1]],c)>=0;)u-=1,s.pop();s.push(l)}r=new Array(s.length+o.length-2);for(var f=0,h=(i=0,o.length);i0;--p)r[f++]=s[p];return r};var n=t("robust-orientation")[3]},{"robust-orientation":284}],247:[function(t,e,r){"use strict";e.exports=function(t,e){e||(e=t,t=window);var r=0,i=0,a=0,o={shift:!1,alt:!1,control:!1,meta:!1},s=!1;function l(t){var e=!1;return"altKey"in t&&(e=e||t.altKey!==o.alt,o.alt=!!t.altKey),"shiftKey"in t&&(e=e||t.shiftKey!==o.shift,o.shift=!!t.shiftKey),"ctrlKey"in t&&(e=e||t.ctrlKey!==o.control,o.control=!!t.ctrlKey),"metaKey"in t&&(e=e||t.metaKey!==o.meta,o.meta=!!t.metaKey),e}function c(t,s){var c=n.x(s),u=n.y(s);"buttons"in s&&(t=0|s.buttons),(t!==r||c!==i||u!==a||l(s))&&(r=0|t,i=c||0,a=u||0,e&&e(r,i,a,o))}function u(t){c(0,t)}function f(){(r||i||a||o.shift||o.alt||o.meta||o.control)&&(i=a=0,r=0,o.shift=o.alt=o.control=o.meta=!1,e&&e(0,0,0,o))}function h(t){l(t)&&e&&e(r,i,a,o)}function p(t){0===n.buttons(t)?c(0,t):c(r,t)}function d(t){c(r|n.buttons(t),t)}function m(t){c(r&~n.buttons(t),t)}function g(){s||(s=!0,t.addEventListener("mousemove",p),t.addEventListener("mousedown",d),t.addEventListener("mouseup",m),t.addEventListener("mouseleave",u),t.addEventListener("mouseenter",u),t.addEventListener("mouseout",u),t.addEventListener("mouseover",u),t.addEventListener("blur",f),t.addEventListener("keyup",h),t.addEventListener("keydown",h),t.addEventListener("keypress",h),t!==window&&(window.addEventListener("blur",f),window.addEventListener("keyup",h),window.addEventListener("keydown",h),window.addEventListener("keypress",h)))}g();var v={element:t};return Object.defineProperties(v,{enabled:{get:function(){return s},set:function(e){e?g():function(){if(!s)return;s=!1,t.removeEventListener("mousemove",p),t.removeEventListener("mousedown",d),t.removeEventListener("mouseup",m),t.removeEventListener("mouseleave",u),t.removeEventListener("mouseenter",u),t.removeEventListener("mouseout",u),t.removeEventListener("mouseover",u),t.removeEventListener("blur",f),t.removeEventListener("keyup",h),t.removeEventListener("keydown",h),t.removeEventListener("keypress",h),t!==window&&(window.removeEventListener("blur",f),window.removeEventListener("keyup",h),window.removeEventListener("keydown",h),window.removeEventListener("keypress",h))}()},enumerable:!0},buttons:{get:function(){return r},enumerable:!0},x:{get:function(){return i},enumerable:!0},y:{get:function(){return a},enumerable:!0},mods:{get:function(){return o},enumerable:!0}}),v};var n=t("mouse-event")},{"mouse-event":249}],248:[function(t,e,r){var n={left:0,top:0};e.exports=function(t,e,r){e=e||t.currentTarget||t.srcElement,Array.isArray(r)||(r=[0,0]);var i=t.clientX||0,a=t.clientY||0,o=(s=e,s===window||s===document||s===document.body?n:s.getBoundingClientRect());var s;return r[0]=i-o.left,r[1]=a-o.top,r}},{}],249:[function(t,e,r){"use strict";function n(t){return t.target||t.srcElement||window}r.buttons=function(t){if("object"==typeof t){if("buttons"in t)return t.buttons;if("which"in t){if(2===(e=t.which))return 4;if(3===e)return 2;if(e>0)return 1<=0)return 1< 0");"function"!=typeof t.vertex&&e("Must specify vertex creation function");"function"!=typeof t.cell&&e("Must specify cell creation function");"function"!=typeof t.phase&&e("Must specify phase function");for(var s=t.getters||[],l=new Array(a),c=0;c=0?l[c]=!0:l[c]=!1;return function(t,e,r,a,o,s){var l=[s,o].join(",");return(0,i[l])(t,e,r,n.mallocUint32,n.freeUint32)}(t.vertex,t.cell,t.phase,0,r,l)};var i={"false,0,1":function(t,e,r,n,i){return function(a,o,s,l){var c,u=0|a.shape[0],f=0|a.shape[1],h=a.data,p=0|a.offset,d=0|a.stride[0],m=0|a.stride[1],g=p,v=0|-d,y=0,x=0|-m,b=0,_=-d-m|0,w=0,T=0|d,k=m-d*u|0,A=0,M=0,S=0,E=2*u|0,L=n(E),C=n(E),P=0,I=0,O=-1,z=-1,D=0,R=0|-u,F=0|u,B=0,N=-u-1|0,j=u-1|0,U=0,V=0,H=0;for(A=0;A0){if(M=1,L[P++]=r(h[g],o,s,l),g+=T,u>0)for(A=1,c=h[g],I=L[P]=r(c,o,s,l),D=L[P+O],B=L[P+R],U=L[P+N],I===D&&I===B&&I===U||(y=h[g+v],b=h[g+x],w=h[g+_],t(A,M,c,y,b,w,I,D,B,U,o,s,l),V=C[P]=S++),P+=1,g+=T,A=2;A0)for(A=1,c=h[g],I=L[P]=r(c,o,s,l),D=L[P+O],B=L[P+R],U=L[P+N],I===D&&I===B&&I===U||(y=h[g+v],b=h[g+x],w=h[g+_],t(A,M,c,y,b,w,I,D,B,U,o,s,l),V=C[P]=S++,U!==B&&e(C[P+R],V,b,w,B,U,o,s,l)),P+=1,g+=T,A=2;A0){if(A=1,L[P++]=r(h[g],o,s,l),g+=T,f>0)for(M=1,c=h[g],I=L[P]=r(c,o,s,l),B=L[P+R],D=L[P+O],U=L[P+N],I===B&&I===D&&I===U||(y=h[g+v],b=h[g+x],w=h[g+_],t(A,M,c,y,b,w,I,B,D,U,o,s,l),V=C[P]=S++),P+=1,g+=T,M=2;M0)for(M=1,c=h[g],I=L[P]=r(c,o,s,l),B=L[P+R],D=L[P+O],U=L[P+N],I===B&&I===D&&I===U||(y=h[g+v],b=h[g+x],w=h[g+_],t(A,M,c,y,b,w,I,B,D,U,o,s,l),V=C[P]=S++,U!==B&&e(C[P+R],V,w,y,U,B,o,s,l)),P+=1,g+=T,M=2;M2&&a[1]>2&&n(i.pick(-1,-1).lo(1,1).hi(a[0]-2,a[1]-2),t.pick(-1,-1,0).lo(1,1).hi(a[0]-2,a[1]-2),t.pick(-1,-1,1).lo(1,1).hi(a[0]-2,a[1]-2)),a[1]>2&&(r(i.pick(0,-1).lo(1).hi(a[1]-2),t.pick(0,-1,1).lo(1).hi(a[1]-2)),e(t.pick(0,-1,0).lo(1).hi(a[1]-2))),a[1]>2&&(r(i.pick(a[0]-1,-1).lo(1).hi(a[1]-2),t.pick(a[0]-1,-1,1).lo(1).hi(a[1]-2)),e(t.pick(a[0]-1,-1,0).lo(1).hi(a[1]-2))),a[0]>2&&(r(i.pick(-1,0).lo(1).hi(a[0]-2),t.pick(-1,0,0).lo(1).hi(a[0]-2)),e(t.pick(-1,0,1).lo(1).hi(a[0]-2))),a[0]>2&&(r(i.pick(-1,a[1]-1).lo(1).hi(a[0]-2),t.pick(-1,a[1]-1,0).lo(1).hi(a[0]-2)),e(t.pick(-1,a[1]-1,1).lo(1).hi(a[0]-2))),t.set(0,0,0,0),t.set(0,0,1,0),t.set(a[0]-1,0,0,0),t.set(a[0]-1,0,1,0),t.set(0,a[1]-1,0,0),t.set(0,a[1]-1,1,0),t.set(a[0]-1,a[1]-1,0,0),t.set(a[0]-1,a[1]-1,1,0),t}}e.exports=function(t,e,r){return Array.isArray(r)||(r=n(e.dimension,"string"==typeof r?r:"clamp")),0===e.size?t:0===e.dimension?(t.set(0),t):function(t){var e=t.join();if(a=u[e])return a;for(var r=t.length,n=[f,h],i=1;i<=r;++i)n.push(p(i));var a=d.apply(void 0,n);return u[e]=a,a}(r)(t,e)}},{dup:65}],253:[function(t,e,r){"use strict";function n(t,e){var r=Math.floor(e),n=e-r,i=0<=r&&r0;){x<64?(l=x,x=0):(l=64,x-=64);for(var b=0|t[1];b>0;){b<64?(c=b,b=0):(c=64,b-=64),n=v+x*f+b*h,o=y+x*d+b*m;var _=0,w=0,T=0,k=p,A=f-u*p,M=h-l*f,S=g,E=d-u*g,L=m-l*d;for(T=0;T0;){m<64?(l=m,m=0):(l=64,m-=64);for(var g=0|t[0];g>0;){g<64?(s=g,g=0):(s=64,g-=64),n=p+m*u+g*c,o=d+m*h+g*f;var v=0,y=0,x=u,b=c-l*u,_=h,w=f-l*h;for(y=0;y0;){y<64?(c=y,y=0):(c=64,y-=64);for(var x=0|t[0];x>0;){x<64?(s=x,x=0):(s=64,x-=64);for(var b=0|t[1];b>0;){b<64?(l=b,b=0):(l=64,b-=64),n=g+y*h+x*u+b*f,o=v+y*m+x*p+b*d;var _=0,w=0,T=0,k=h,A=u-c*h,M=f-s*u,S=m,E=p-c*m,L=d-s*p;for(T=0;Tr;){v=0,y=m-o;e:for(g=0;gb)break e;y+=f,v+=h}for(v=m,y=m-o,g=0;g>1,q=H-j,G=H+j,Y=U,W=q,X=H,Z=G,J=V,K=i+1,Q=a-1,$=!0,tt=0,et=0,rt=0,nt=f,it=e(nt),at=e(nt);A=l*Y,M=l*W,N=s;t:for(k=0;k0){g=Y,Y=W,W=g;break t}if(rt<0)break t;N+=p}A=l*Z,M=l*J,N=s;t:for(k=0;k0){g=Z,Z=J,J=g;break t}if(rt<0)break t;N+=p}A=l*Y,M=l*X,N=s;t:for(k=0;k0){g=Y,Y=X,X=g;break t}if(rt<0)break t;N+=p}A=l*W,M=l*X,N=s;t:for(k=0;k0){g=W,W=X,X=g;break t}if(rt<0)break t;N+=p}A=l*Y,M=l*Z,N=s;t:for(k=0;k0){g=Y,Y=Z,Z=g;break t}if(rt<0)break t;N+=p}A=l*X,M=l*Z,N=s;t:for(k=0;k0){g=X,X=Z,Z=g;break t}if(rt<0)break t;N+=p}A=l*W,M=l*J,N=s;t:for(k=0;k0){g=W,W=J,J=g;break t}if(rt<0)break t;N+=p}A=l*W,M=l*X,N=s;t:for(k=0;k0){g=W,W=X,X=g;break t}if(rt<0)break t;N+=p}A=l*Z,M=l*J,N=s;t:for(k=0;k0){g=Z,Z=J,J=g;break t}if(rt<0)break t;N+=p}for(A=l*Y,M=l*W,S=l*X,E=l*Z,L=l*J,C=l*U,P=l*H,I=l*V,B=0,N=s,k=0;k0)){if(rt<0){for(A=l*b,M=l*K,S=l*Q,N=s,k=0;k0)for(;;){_=s+Q*l,B=0;t:for(k=0;k0)){_=s+Q*l,B=0;t:for(k=0;kV){t:for(;;){for(_=s+K*l,B=0,N=s,k=0;k1&&n?s(r,n[0],n[1]):s(r)}(t,e,l);return n(l,c)}},{"typedarray-pool":308}],258:[function(t,e,r){"use strict";var n=t("./lib/compile_sort.js"),i={};e.exports=function(t){var e=t.order,r=t.dtype,a=[e,r].join(":"),o=i[a];return o||(i[a]=o=n(e,r)),o(t),t}},{"./lib/compile_sort.js":257}],259:[function(t,e,r){var n=t("is-buffer"),i="undefined"!=typeof Float64Array;function a(t,e){return t[0]-e[0]}function o(){var t,e=this.stride,r=new Array(e.length);for(t=0;t=0&&(e+=a*(r=0|t),i-=r),new n(this.data,i,a,e)},i.step=function(t){var e=this.shape[0],r=this.stride[0],i=this.offset,a=0,o=Math.ceil;return"number"==typeof t&&((a=0|t)<0?(i+=r*(e-1),e=o(-e/a)):e=o(e/a),r*=a),new n(this.data,e,r,i)},i.transpose=function(t){t=void 0===t?0:0|t;var e=this.shape,r=this.stride;return new n(this.data,e[t],r[t],this.offset)},i.pick=function(t){var r=[],n=[],i=this.offset;return"number"==typeof t&&t>=0?i=i+this.stride[0]*t|0:(r.push(this.shape[0]),n.push(this.stride[0])),(0,e[r.length+1])(this.data,r,n,i)},function(t,e,r,i){return new n(t,e[0],r[0],i)}},2:function(t,e,r){function n(t,e,r,n,i,a){this.data=t,this.shape=[e,r],this.stride=[n,i],this.offset=0|a}var i=n.prototype;return i.dtype=t,i.dimension=2,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]}}),Object.defineProperty(i,"order",{get:function(){return Math.abs(this.stride[0])>Math.abs(this.stride[1])?[1,0]:[0,1]}}),i.set=function(e,r,n){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*r,n):this.data[this.offset+this.stride[0]*e+this.stride[1]*r]=n},i.get=function(e,r){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*r):this.data[this.offset+this.stride[0]*e+this.stride[1]*r]},i.index=function(t,e){return this.offset+this.stride[0]*t+this.stride[1]*e},i.hi=function(t,e){return new n(this.data,"number"!=typeof t||t<0?this.shape[0]:0|t,"number"!=typeof e||e<0?this.shape[1]:0|e,this.stride[0],this.stride[1],this.offset)},i.lo=function(t,e){var r=this.offset,i=0,a=this.shape[0],o=this.shape[1],s=this.stride[0],l=this.stride[1];return"number"==typeof t&&t>=0&&(r+=s*(i=0|t),a-=i),"number"==typeof e&&e>=0&&(r+=l*(i=0|e),o-=i),new n(this.data,a,o,s,l,r)},i.step=function(t,e){var r=this.shape[0],i=this.shape[1],a=this.stride[0],o=this.stride[1],s=this.offset,l=0,c=Math.ceil;return"number"==typeof t&&((l=0|t)<0?(s+=a*(r-1),r=c(-r/l)):r=c(r/l),a*=l),"number"==typeof e&&((l=0|e)<0?(s+=o*(i-1),i=c(-i/l)):i=c(i/l),o*=l),new n(this.data,r,i,a,o,s)},i.transpose=function(t,e){t=void 0===t?0:0|t,e=void 0===e?1:0|e;var r=this.shape,i=this.stride;return new n(this.data,r[t],r[e],i[t],i[e],this.offset)},i.pick=function(t,r){var n=[],i=[],a=this.offset;return"number"==typeof t&&t>=0?a=a+this.stride[0]*t|0:(n.push(this.shape[0]),i.push(this.stride[0])),"number"==typeof r&&r>=0?a=a+this.stride[1]*r|0:(n.push(this.shape[1]),i.push(this.stride[1])),(0,e[n.length+1])(this.data,n,i,a)},function(t,e,r,i){return new n(t,e[0],e[1],r[0],r[1],i)}},3:function(t,e,r){function n(t,e,r,n,i,a,o,s){this.data=t,this.shape=[e,r,n],this.stride=[i,a,o],this.offset=0|s}var i=n.prototype;return i.dtype=t,i.dimension=3,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]*this.shape[2]}}),Object.defineProperty(i,"order",{get:function(){var t=Math.abs(this.stride[0]),e=Math.abs(this.stride[1]),r=Math.abs(this.stride[2]);return t>e?e>r?[2,1,0]:t>r?[1,2,0]:[1,0,2]:t>r?[2,0,1]:r>e?[0,1,2]:[0,2,1]}}),i.set=function(e,r,n,i){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n,i):this.data[this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n]=i},i.get=function(e,r,n){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n):this.data[this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n]},i.index=function(t,e,r){return this.offset+this.stride[0]*t+this.stride[1]*e+this.stride[2]*r},i.hi=function(t,e,r){return new n(this.data,"number"!=typeof t||t<0?this.shape[0]:0|t,"number"!=typeof e||e<0?this.shape[1]:0|e,"number"!=typeof r||r<0?this.shape[2]:0|r,this.stride[0],this.stride[1],this.stride[2],this.offset)},i.lo=function(t,e,r){var i=this.offset,a=0,o=this.shape[0],s=this.shape[1],l=this.shape[2],c=this.stride[0],u=this.stride[1],f=this.stride[2];return"number"==typeof t&&t>=0&&(i+=c*(a=0|t),o-=a),"number"==typeof e&&e>=0&&(i+=u*(a=0|e),s-=a),"number"==typeof r&&r>=0&&(i+=f*(a=0|r),l-=a),new n(this.data,o,s,l,c,u,f,i)},i.step=function(t,e,r){var i=this.shape[0],a=this.shape[1],o=this.shape[2],s=this.stride[0],l=this.stride[1],c=this.stride[2],u=this.offset,f=0,h=Math.ceil;return"number"==typeof t&&((f=0|t)<0?(u+=s*(i-1),i=h(-i/f)):i=h(i/f),s*=f),"number"==typeof e&&((f=0|e)<0?(u+=l*(a-1),a=h(-a/f)):a=h(a/f),l*=f),"number"==typeof r&&((f=0|r)<0?(u+=c*(o-1),o=h(-o/f)):o=h(o/f),c*=f),new n(this.data,i,a,o,s,l,c,u)},i.transpose=function(t,e,r){t=void 0===t?0:0|t,e=void 0===e?1:0|e,r=void 0===r?2:0|r;var i=this.shape,a=this.stride;return new n(this.data,i[t],i[e],i[r],a[t],a[e],a[r],this.offset)},i.pick=function(t,r,n){var i=[],a=[],o=this.offset;return"number"==typeof t&&t>=0?o=o+this.stride[0]*t|0:(i.push(this.shape[0]),a.push(this.stride[0])),"number"==typeof r&&r>=0?o=o+this.stride[1]*r|0:(i.push(this.shape[1]),a.push(this.stride[1])),"number"==typeof n&&n>=0?o=o+this.stride[2]*n|0:(i.push(this.shape[2]),a.push(this.stride[2])),(0,e[i.length+1])(this.data,i,a,o)},function(t,e,r,i){return new n(t,e[0],e[1],e[2],r[0],r[1],r[2],i)}},4:function(t,e,r){function n(t,e,r,n,i,a,o,s,l,c){this.data=t,this.shape=[e,r,n,i],this.stride=[a,o,s,l],this.offset=0|c}var i=n.prototype;return i.dtype=t,i.dimension=4,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]*this.shape[2]*this.shape[3]}}),Object.defineProperty(i,"order",{get:r}),i.set=function(e,r,n,i,a){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i,a):this.data[this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i]=a},i.get=function(e,r,n,i){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i):this.data[this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i]},i.index=function(t,e,r,n){return this.offset+this.stride[0]*t+this.stride[1]*e+this.stride[2]*r+this.stride[3]*n},i.hi=function(t,e,r,i){return new n(this.data,"number"!=typeof t||t<0?this.shape[0]:0|t,"number"!=typeof e||e<0?this.shape[1]:0|e,"number"!=typeof r||r<0?this.shape[2]:0|r,"number"!=typeof i||i<0?this.shape[3]:0|i,this.stride[0],this.stride[1],this.stride[2],this.stride[3],this.offset)},i.lo=function(t,e,r,i){var a=this.offset,o=0,s=this.shape[0],l=this.shape[1],c=this.shape[2],u=this.shape[3],f=this.stride[0],h=this.stride[1],p=this.stride[2],d=this.stride[3];return"number"==typeof t&&t>=0&&(a+=f*(o=0|t),s-=o),"number"==typeof e&&e>=0&&(a+=h*(o=0|e),l-=o),"number"==typeof r&&r>=0&&(a+=p*(o=0|r),c-=o),"number"==typeof i&&i>=0&&(a+=d*(o=0|i),u-=o),new n(this.data,s,l,c,u,f,h,p,d,a)},i.step=function(t,e,r,i){var a=this.shape[0],o=this.shape[1],s=this.shape[2],l=this.shape[3],c=this.stride[0],u=this.stride[1],f=this.stride[2],h=this.stride[3],p=this.offset,d=0,m=Math.ceil;return"number"==typeof t&&((d=0|t)<0?(p+=c*(a-1),a=m(-a/d)):a=m(a/d),c*=d),"number"==typeof e&&((d=0|e)<0?(p+=u*(o-1),o=m(-o/d)):o=m(o/d),u*=d),"number"==typeof r&&((d=0|r)<0?(p+=f*(s-1),s=m(-s/d)):s=m(s/d),f*=d),"number"==typeof i&&((d=0|i)<0?(p+=h*(l-1),l=m(-l/d)):l=m(l/d),h*=d),new n(this.data,a,o,s,l,c,u,f,h,p)},i.transpose=function(t,e,r,i){t=void 0===t?0:0|t,e=void 0===e?1:0|e,r=void 0===r?2:0|r,i=void 0===i?3:0|i;var a=this.shape,o=this.stride;return new n(this.data,a[t],a[e],a[r],a[i],o[t],o[e],o[r],o[i],this.offset)},i.pick=function(t,r,n,i){var a=[],o=[],s=this.offset;return"number"==typeof t&&t>=0?s=s+this.stride[0]*t|0:(a.push(this.shape[0]),o.push(this.stride[0])),"number"==typeof r&&r>=0?s=s+this.stride[1]*r|0:(a.push(this.shape[1]),o.push(this.stride[1])),"number"==typeof n&&n>=0?s=s+this.stride[2]*n|0:(a.push(this.shape[2]),o.push(this.stride[2])),"number"==typeof i&&i>=0?s=s+this.stride[3]*i|0:(a.push(this.shape[3]),o.push(this.stride[3])),(0,e[a.length+1])(this.data,a,o,s)},function(t,e,r,i){return new n(t,e[0],e[1],e[2],e[3],r[0],r[1],r[2],r[3],i)}},5:function(t,e,r){function n(t,e,r,n,i,a,o,s,l,c,u,f){this.data=t,this.shape=[e,r,n,i,a],this.stride=[o,s,l,c,u],this.offset=0|f}var i=n.prototype;return i.dtype=t,i.dimension=5,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]*this.shape[2]*this.shape[3]*this.shape[4]}}),Object.defineProperty(i,"order",{get:r}),i.set=function(e,r,n,i,a,o){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i+this.stride[4]*a,o):this.data[this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i+this.stride[4]*a]=o},i.get=function(e,r,n,i,a){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i+this.stride[4]*a):this.data[this.offset+this.stride[0]*e+this.stride[1]*r+this.stride[2]*n+this.stride[3]*i+this.stride[4]*a]},i.index=function(t,e,r,n,i){return this.offset+this.stride[0]*t+this.stride[1]*e+this.stride[2]*r+this.stride[3]*n+this.stride[4]*i},i.hi=function(t,e,r,i,a){return new n(this.data,"number"!=typeof t||t<0?this.shape[0]:0|t,"number"!=typeof e||e<0?this.shape[1]:0|e,"number"!=typeof r||r<0?this.shape[2]:0|r,"number"!=typeof i||i<0?this.shape[3]:0|i,"number"!=typeof a||a<0?this.shape[4]:0|a,this.stride[0],this.stride[1],this.stride[2],this.stride[3],this.stride[4],this.offset)},i.lo=function(t,e,r,i,a){var o=this.offset,s=0,l=this.shape[0],c=this.shape[1],u=this.shape[2],f=this.shape[3],h=this.shape[4],p=this.stride[0],d=this.stride[1],m=this.stride[2],g=this.stride[3],v=this.stride[4];return"number"==typeof t&&t>=0&&(o+=p*(s=0|t),l-=s),"number"==typeof e&&e>=0&&(o+=d*(s=0|e),c-=s),"number"==typeof r&&r>=0&&(o+=m*(s=0|r),u-=s),"number"==typeof i&&i>=0&&(o+=g*(s=0|i),f-=s),"number"==typeof a&&a>=0&&(o+=v*(s=0|a),h-=s),new n(this.data,l,c,u,f,h,p,d,m,g,v,o)},i.step=function(t,e,r,i,a){var o=this.shape[0],s=this.shape[1],l=this.shape[2],c=this.shape[3],u=this.shape[4],f=this.stride[0],h=this.stride[1],p=this.stride[2],d=this.stride[3],m=this.stride[4],g=this.offset,v=0,y=Math.ceil;return"number"==typeof t&&((v=0|t)<0?(g+=f*(o-1),o=y(-o/v)):o=y(o/v),f*=v),"number"==typeof e&&((v=0|e)<0?(g+=h*(s-1),s=y(-s/v)):s=y(s/v),h*=v),"number"==typeof r&&((v=0|r)<0?(g+=p*(l-1),l=y(-l/v)):l=y(l/v),p*=v),"number"==typeof i&&((v=0|i)<0?(g+=d*(c-1),c=y(-c/v)):c=y(c/v),d*=v),"number"==typeof a&&((v=0|a)<0?(g+=m*(u-1),u=y(-u/v)):u=y(u/v),m*=v),new n(this.data,o,s,l,c,u,f,h,p,d,m,g)},i.transpose=function(t,e,r,i,a){t=void 0===t?0:0|t,e=void 0===e?1:0|e,r=void 0===r?2:0|r,i=void 0===i?3:0|i,a=void 0===a?4:0|a;var o=this.shape,s=this.stride;return new n(this.data,o[t],o[e],o[r],o[i],o[a],s[t],s[e],s[r],s[i],s[a],this.offset)},i.pick=function(t,r,n,i,a){var o=[],s=[],l=this.offset;return"number"==typeof t&&t>=0?l=l+this.stride[0]*t|0:(o.push(this.shape[0]),s.push(this.stride[0])),"number"==typeof r&&r>=0?l=l+this.stride[1]*r|0:(o.push(this.shape[1]),s.push(this.stride[1])),"number"==typeof n&&n>=0?l=l+this.stride[2]*n|0:(o.push(this.shape[2]),s.push(this.stride[2])),"number"==typeof i&&i>=0?l=l+this.stride[3]*i|0:(o.push(this.shape[3]),s.push(this.stride[3])),"number"==typeof a&&a>=0?l=l+this.stride[4]*a|0:(o.push(this.shape[4]),s.push(this.stride[4])),(0,e[o.length+1])(this.data,o,s,l)},function(t,e,r,i){return new n(t,e[0],e[1],e[2],e[3],e[4],r[0],r[1],r[2],r[3],r[4],i)}}};function l(t,e){var r=-1===e?"T":String(e),n=s[r];return-1===e?n(t):0===e?n(t,c[t][0]):n(t,c[t],o)}var c={generic:[],buffer:[],array:[],float32:[],float64:[],int8:[],int16:[],int32:[],uint8_clamped:[],uint8:[],uint16:[],uint32:[],bigint64:[],biguint64:[]};e.exports=function(t,e,r,a){if(void 0===t)return(0,c.array[0])([]);"number"==typeof t&&(t=[t]),void 0===e&&(e=[t.length]);var o=e.length;if(void 0===r){r=new Array(o);for(var s=o-1,u=1;s>=0;--s)r[s]=u,u*=e[s]}if(void 0===a){a=0;for(s=0;st==t>0?a===-1>>>0?(r+=1,a=0):a+=1:0===a?(a=-1>>>0,r-=1):a-=1;return n.pack(a,r)}},{"double-bits":64}],261:[function(t,e,r){r.vertexNormals=function(t,e,r){for(var n=e.length,i=new Array(n),a=void 0===r?1e-6:r,o=0;oa){var b=i[c],_=1/Math.sqrt(g*y);for(x=0;x<3;++x){var w=(x+1)%3,T=(x+2)%3;b[x]+=_*(v[w]*m[T]-v[T]*m[w])}}}for(o=0;oa)for(_=1/Math.sqrt(k),x=0;x<3;++x)b[x]*=_;else for(x=0;x<3;++x)b[x]=0}return i},r.faceNormals=function(t,e,r){for(var n=t.length,i=new Array(n),a=void 0===r?1e-6:r,o=0;oa?1/Math.sqrt(p):0;for(c=0;c<3;++c)h[c]*=p;i[o]=h}return i}},{}],262:[function(t,e,r){"use strict";e.exports=function(t,e,r,n,i,a,o,s,l,c){var u=e+a+c;if(f>0){var f=Math.sqrt(u+1);t[0]=.5*(o-l)/f,t[1]=.5*(s-n)/f,t[2]=.5*(r-a)/f,t[3]=.5*f}else{var h=Math.max(e,a,c);f=Math.sqrt(2*h-u+1);e>=h?(t[0]=.5*f,t[1]=.5*(i+r)/f,t[2]=.5*(s+n)/f,t[3]=.5*(o-l)/f):a>=h?(t[0]=.5*(r+i)/f,t[1]=.5*f,t[2]=.5*(l+o)/f,t[3]=.5*(s-n)/f):(t[0]=.5*(n+s)/f,t[1]=.5*(o+l)/f,t[2]=.5*f,t[3]=.5*(r-i)/f)}return t}},{}],263:[function(t,e,r){"use strict";e.exports=function(t){var e=(t=t||{}).center||[0,0,0],r=t.rotation||[0,0,0,1],n=t.radius||1;e=[].slice.call(e,0,3),u(r=[].slice.call(r,0,4),r);var i=new f(r,e,Math.log(n));i.setDistanceLimits(t.zoomMin,t.zoomMax),("eye"in t||"up"in t)&&i.lookAt(0,t.eye,t.center,t.up);return i};var n=t("filtered-vector"),i=t("gl-mat4/lookAt"),a=t("gl-mat4/fromQuat"),o=t("gl-mat4/invert"),s=t("./lib/quatFromFrame");function l(t,e,r){return Math.sqrt(Math.pow(t,2)+Math.pow(e,2)+Math.pow(r,2))}function c(t,e,r,n){return Math.sqrt(Math.pow(t,2)+Math.pow(e,2)+Math.pow(r,2)+Math.pow(n,2))}function u(t,e){var r=e[0],n=e[1],i=e[2],a=e[3],o=c(r,n,i,a);o>1e-6?(t[0]=r/o,t[1]=n/o,t[2]=i/o,t[3]=a/o):(t[0]=t[1]=t[2]=0,t[3]=1)}function f(t,e,r){this.radius=n([r]),this.center=n(e),this.rotation=n(t),this.computedRadius=this.radius.curve(0),this.computedCenter=this.center.curve(0),this.computedRotation=this.rotation.curve(0),this.computedUp=[.1,0,0],this.computedEye=[.1,0,0],this.computedMatrix=[.1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],this.recalcMatrix(0)}var h=f.prototype;h.lastT=function(){return Math.max(this.radius.lastT(),this.center.lastT(),this.rotation.lastT())},h.recalcMatrix=function(t){this.radius.curve(t),this.center.curve(t),this.rotation.curve(t);var e=this.computedRotation;u(e,e);var r=this.computedMatrix;a(r,e);var n=this.computedCenter,i=this.computedEye,o=this.computedUp,s=Math.exp(this.computedRadius[0]);i[0]=n[0]+s*r[2],i[1]=n[1]+s*r[6],i[2]=n[2]+s*r[10],o[0]=r[1],o[1]=r[5],o[2]=r[9];for(var l=0;l<3;++l){for(var c=0,f=0;f<3;++f)c+=r[l+4*f]*i[f];r[12+l]=-c}},h.getMatrix=function(t,e){this.recalcMatrix(t);var r=this.computedMatrix;if(e){for(var n=0;n<16;++n)e[n]=r[n];return e}return r},h.idle=function(t){this.center.idle(t),this.radius.idle(t),this.rotation.idle(t)},h.flush=function(t){this.center.flush(t),this.radius.flush(t),this.rotation.flush(t)},h.pan=function(t,e,r,n){e=e||0,r=r||0,n=n||0,this.recalcMatrix(t);var i=this.computedMatrix,a=i[1],o=i[5],s=i[9],c=l(a,o,s);a/=c,o/=c,s/=c;var u=i[0],f=i[4],h=i[8],p=u*a+f*o+h*s,d=l(u-=a*p,f-=o*p,h-=s*p);u/=d,f/=d,h/=d;var m=i[2],g=i[6],v=i[10],y=m*a+g*o+v*s,x=m*u+g*f+v*h,b=l(m-=y*a+x*u,g-=y*o+x*f,v-=y*s+x*h);m/=b,g/=b,v/=b;var _=u*e+a*r,w=f*e+o*r,T=h*e+s*r;this.center.move(t,_,w,T);var k=Math.exp(this.computedRadius[0]);k=Math.max(1e-4,k+n),this.radius.set(t,Math.log(k))},h.rotate=function(t,e,r,n){this.recalcMatrix(t),e=e||0,r=r||0;var i=this.computedMatrix,a=i[0],o=i[4],s=i[8],u=i[1],f=i[5],h=i[9],p=i[2],d=i[6],m=i[10],g=e*a+r*u,v=e*o+r*f,y=e*s+r*h,x=-(d*y-m*v),b=-(m*g-p*y),_=-(p*v-d*g),w=Math.sqrt(Math.max(0,1-Math.pow(x,2)-Math.pow(b,2)-Math.pow(_,2))),T=c(x,b,_,w);T>1e-6?(x/=T,b/=T,_/=T,w/=T):(x=b=_=0,w=1);var k=this.computedRotation,A=k[0],M=k[1],S=k[2],E=k[3],L=A*w+E*x+M*_-S*b,C=M*w+E*b+S*x-A*_,P=S*w+E*_+A*b-M*x,I=E*w-A*x-M*b-S*_;if(n){x=p,b=d,_=m;var O=Math.sin(n)/l(x,b,_);x*=O,b*=O,_*=O,I=I*(w=Math.cos(e))-(L=L*w+I*x+C*_-P*b)*x-(C=C*w+I*b+P*x-L*_)*b-(P=P*w+I*_+L*b-C*x)*_}var z=c(L,C,P,I);z>1e-6?(L/=z,C/=z,P/=z,I/=z):(L=C=P=0,I=1),this.rotation.set(t,L,C,P,I)},h.lookAt=function(t,e,r,n){this.recalcMatrix(t),r=r||this.computedCenter,e=e||this.computedEye,n=n||this.computedUp;var a=this.computedMatrix;i(a,e,r,n);var o=this.computedRotation;s(o,a[0],a[1],a[2],a[4],a[5],a[6],a[8],a[9],a[10]),u(o,o),this.rotation.set(t,o[0],o[1],o[2],o[3]);for(var l=0,c=0;c<3;++c)l+=Math.pow(r[c]-e[c],2);this.radius.set(t,.5*Math.log(Math.max(l,1e-6))),this.center.set(t,r[0],r[1],r[2])},h.translate=function(t,e,r,n){this.center.move(t,e||0,r||0,n||0)},h.setMatrix=function(t,e){var r=this.computedRotation;s(r,e[0],e[1],e[2],e[4],e[5],e[6],e[8],e[9],e[10]),u(r,r),this.rotation.set(t,r[0],r[1],r[2],r[3]);var n=this.computedMatrix;o(n,e);var i=n[15];if(Math.abs(i)>1e-6){var a=n[12]/i,l=n[13]/i,c=n[14]/i;this.recalcMatrix(t);var f=Math.exp(this.computedRadius[0]);this.center.set(t,a-n[2]*f,l-n[6]*f,c-n[10]*f),this.radius.idle(t)}else this.center.idle(t),this.radius.idle(t)},h.setDistance=function(t,e){e>0&&this.radius.set(t,Math.log(e))},h.setDistanceLimits=function(t,e){t=t>0?Math.log(t):-1/0,e=e>0?Math.log(e):1/0,e=Math.max(e,t),this.radius.bounds[0][0]=t,this.radius.bounds[1][0]=e},h.getDistanceLimits=function(t){var e=this.radius.bounds;return t?(t[0]=Math.exp(e[0][0]),t[1]=Math.exp(e[1][0]),t):[Math.exp(e[0][0]),Math.exp(e[1][0])]},h.toJSON=function(){return this.recalcMatrix(this.lastT()),{center:this.computedCenter.slice(),rotation:this.computedRotation.slice(),distance:Math.log(this.computedRadius[0]),zoomMin:this.radius.bounds[0][0],zoomMax:this.radius.bounds[1][0]}},h.fromJSON=function(t){var e=this.lastT(),r=t.center;r&&this.center.set(e,r[0],r[1],r[2]);var n=t.rotation;n&&this.rotation.set(e,n[0],n[1],n[2],n[3]);var i=t.distance;i&&i>0&&this.radius.set(e,Math.log(i)),this.setDistanceLimits(t.zoomMin,t.zoomMax)}},{"./lib/quatFromFrame":262,"filtered-vector":68,"gl-mat4/fromQuat":95,"gl-mat4/invert":98,"gl-mat4/lookAt":99}],264:[function(t,e,r){ -/*! - * pad-left - * - * Copyright (c) 2014-2015, Jon Schlinkert. - * Licensed under the MIT license. - */ -"use strict";var n=t("repeat-string");e.exports=function(t,e,r){return n(r=void 0!==r?r+"":" ",e)+t}},{"repeat-string":277}],265:[function(t,e,r){e.exports=function(t,e){e||(e=[0,""]),t=String(t);var r=parseFloat(t,10);return e[0]=r,e[1]=t.match(/[\d.\-\+]*\s*(.*)/)[1]||"",e}},{}],266:[function(t,e,r){"use strict";e.exports=function(t,e){for(var r=0|e.length,i=t.length,a=[new Array(r),new Array(r)],o=0;o0){o=a[u][r][0],l=u;break}s=o[1^l];for(var f=0;f<2;++f)for(var h=a[f][r],p=0;p0&&(o=d,s=m,l=f)}return i||o&&c(o,l),s}function f(t,r){var i=a[r][t][0],o=[t];c(i,r);for(var s=i[1^r];;){for(;s!==t;)o.push(s),s=u(o[o.length-2],s,!1);if(a[0][t].length+a[1][t].length===0)break;var l=o[o.length-1],f=t,h=o[1],p=u(l,f,!0);if(n(e[l],e[f],e[h],e[p])<0)break;o.push(t),s=u(l,f)}return o}function h(t,e){return e[1]===e[e.length-1]}for(o=0;o0;){a[0][o].length;var m=f(o,p);h(0,m)?d.push.apply(d,m):(d.length>0&&l.push(d),d=m)}d.length>0&&l.push(d)}return l};var n=t("compare-angle")},{"compare-angle":54}],267:[function(t,e,r){"use strict";e.exports=function(t,e){for(var r=n(t,e.length),i=new Array(e.length),a=new Array(e.length),o=[],s=0;s0;){var c=o.pop();i[c]=!1;var u=r[c];for(s=0;s0}))).length,g=new Array(m),v=new Array(m);for(p=0;p0;){var B=R.pop(),N=E[B];l(N,(function(t,e){return t-e}));var j,U=N.length,V=F[B];if(0===V){var H=d[B];j=[H]}for(p=0;p=0))if(F[q]=1^V,R.push(q),0===V)D(H=d[q])||(H.reverse(),j.push(H))}0===V&&r.push(j)}return r};var n=t("edges-to-adjacency-list"),i=t("planar-dual"),a=t("point-in-big-polygon"),o=t("two-product"),s=t("robust-sum"),l=t("uniq"),c=t("./lib/trim-leaves");function u(t,e){for(var r=new Array(t),n=0;n0&&e[i]===r[0]))return 1;a=t[i-1]}for(var s=1;a;){var l=a.key,c=n(r,l[0],l[1]);if(l[0][0]0))return 0;s=-1,a=a.right}else if(c>0)a=a.left;else{if(!(c<0))return 0;s=1,a=a.right}}return s}}(v.slabs,v.coordinates);return 0===a.length?y:function(t,e){return function(r){return t(r[0],r[1])?0:e(r)}}(l(a),y)};var n=t("robust-orientation")[3],i=t("slab-decomposition"),a=t("interval-tree-1d"),o=t("binary-search-bounds");function s(){return!0}function l(t){for(var e={},r=0;r=c?(k=1,y=c+2*h+d):y=h*(k=-h/c)+d):(k=0,p>=0?(A=0,y=d):-p>=f?(A=1,y=f+2*p+d):y=p*(A=-p/f)+d);else if(A<0)A=0,h>=0?(k=0,y=d):-h>=c?(k=1,y=c+2*h+d):y=h*(k=-h/c)+d;else{var M=1/T;y=(k*=M)*(c*k+u*(A*=M)+2*h)+A*(u*k+f*A+2*p)+d}else k<0?(b=f+p)>(x=u+h)?(_=b-x)>=(w=c-2*u+f)?(k=1,A=0,y=c+2*h+d):y=(k=_/w)*(c*k+u*(A=1-k)+2*h)+A*(u*k+f*A+2*p)+d:(k=0,b<=0?(A=1,y=f+2*p+d):p>=0?(A=0,y=d):y=p*(A=-p/f)+d):A<0?(b=c+h)>(x=u+p)?(_=b-x)>=(w=c-2*u+f)?(A=1,k=0,y=f+2*p+d):y=(k=1-(A=_/w))*(c*k+u*A+2*h)+A*(u*k+f*A+2*p)+d:(A=0,b<=0?(k=1,y=c+2*h+d):h>=0?(k=0,y=d):y=h*(k=-h/c)+d):(_=f+p-u-h)<=0?(k=0,A=1,y=f+2*p+d):_>=(w=c-2*u+f)?(k=1,A=0,y=c+2*h+d):y=(k=_/w)*(c*k+u*(A=1-k)+2*h)+A*(u*k+f*A+2*p)+d;var S=1-k-A;for(l=0;l0){var c=t[r-1];if(0===n(s,c)&&a(c)!==l){r-=1;continue}}t[r++]=s}}return t.length=r,t}},{"cell-orientation":47,"compare-cell":56,"compare-oriented-cell":57}],277:[function(t,e,r){ -/*! - * repeat-string - * - * Copyright (c) 2014-2015, Jon Schlinkert. - * Licensed under the MIT License. - */ -"use strict";var n,i="";e.exports=function(t,e){if("string"!=typeof t)throw new TypeError("expected a string");if(1===e)return t;if(2===e)return t+t;var r=t.length*e;if(n!==t||void 0===n)n=t,i="";else if(i.length>=r)return i.substr(0,r);for(;r>i.length&&e>1;)1&e&&(i+=t),e>>=1,t+=t;return i=(i+=t).substr(0,r)}},{}],278:[function(t,e,r){(function(t){(function(){e.exports=t.performance&&t.performance.now?function(){return performance.now()}:Date.now||function(){return+new Date}}).call(this)}).call(this,void 0!==n?n:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],279:[function(t,e,r){"use strict";e.exports=function(t){for(var e=t.length,r=t[t.length-1],n=e,i=e-2;i>=0;--i){var a=r,o=t[i];(l=o-((r=a+o)-a))&&(t[--n]=r,r=l)}var s=0;for(i=n;i0){if(a<=0)return o;n=i+a}else{if(!(i<0))return o;if(a>=0)return o;n=-(i+a)}var s=33306690738754716e-32*n;return o>=s||o<=-s?o:f(t,e,r)},function(t,e,r,n){var i=t[0]-n[0],a=e[0]-n[0],o=r[0]-n[0],s=t[1]-n[1],l=e[1]-n[1],c=r[1]-n[1],u=t[2]-n[2],f=e[2]-n[2],p=r[2]-n[2],d=a*c,m=o*l,g=o*s,v=i*c,y=i*l,x=a*s,b=u*(d-m)+f*(g-v)+p*(y-x),_=7771561172376103e-31*((Math.abs(d)+Math.abs(m))*Math.abs(u)+(Math.abs(g)+Math.abs(v))*Math.abs(f)+(Math.abs(y)+Math.abs(x))*Math.abs(p));return b>_||-b>_?b:h(t,e,r,n)}];function d(t){var e=p[t.length];return e||(e=p[t.length]=u(t.length)),e.apply(void 0,t)}function m(t,e,r,n,i,a,o){return function(e,r,s,l,c){switch(arguments.length){case 0:case 1:return 0;case 2:return n(e,r);case 3:return i(e,r,s);case 4:return a(e,r,s,l);case 5:return o(e,r,s,l,c)}for(var u=new Array(arguments.length),f=0;f0&&o>0||a<0&&o<0)return!1;var s=n(r,t,e),l=n(i,t,e);if(s>0&&l>0||s<0&&l<0)return!1;if(0===a&&0===o&&0===s&&0===l)return function(t,e,r,n){for(var i=0;i<2;++i){var a=t[i],o=e[i],s=Math.min(a,o),l=Math.max(a,o),c=r[i],u=n[i],f=Math.min(c,u);if(Math.max(c,u)=n?(i=f,(l+=1)=n?(i=f,(l+=1)>1,c=e[2*l+1];if(c===a)return l;a>1,c=e[2*l+1];if(c===a)return l;a>1,c=e[2*l+1];if(c===a)return l;a>1,s=a(t[o],e);s<=0?(0===s&&(i=o),r=o+1):s>0&&(n=o-1)}return i}function u(t,e){for(var r=new Array(t.length),i=0,o=r.length;i=t.length||0!==a(t[g],s)););}return r}function f(t,e){if(e<0)return[];for(var r=[],i=(1<>>u&1&&c.push(i[u]);e.push(c)}return s(e)},r.skeleton=f,r.boundary=function(t){for(var e=[],r=0,n=t.length;r>1:(t>>1)-1}function x(t){for(var e=v(t);;){var r=e,n=2*t+1,i=2*(t+1),a=t;if(n0;){var r=y(t);if(r>=0)if(e0){var t=k[0];return g(0,M-1),M-=1,x(0),t}return-1}function w(t,e){var r=k[t];return c[r]===e?t:(c[r]=-1/0,b(t),_(),c[r]=e,b((M+=1)-1))}function T(t){if(!u[t]){u[t]=!0;var e=s[t],r=l[t];s[r]>=0&&(s[r]=e),l[e]>=0&&(l[e]=r),A[e]>=0&&w(A[e],m(e)),A[r]>=0&&w(A[r],m(r))}}var k=[],A=new Array(a);for(f=0;f>1;f>=0;--f)x(f);for(;;){var S=_();if(S<0||c[S]>r)break;T(S)}var E=[];for(f=0;f=0&&r>=0&&e!==r){var n=A[e],i=A[r];n!==i&&C.push([n,i])}})),i.unique(i.normalize(C)),{positions:E,edges:C}};var n=t("robust-orientation"),i=t("simplicial-complex")},{"robust-orientation":284,"simplicial-complex":295}],298:[function(t,e,r){"use strict";e.exports=function(t,e){var r,a,o,s;if(e[0][0]e[1][0]))return i(e,t);r=e[1],a=e[0]}if(t[0][0]t[1][0]))return-i(t,e);o=t[1],s=t[0]}var l=n(r,a,s),c=n(r,a,o);if(l<0){if(c<=0)return l}else if(l>0){if(c>=0)return l}else if(c)return c;if(l=n(s,o,a),c=n(s,o,r),l<0){if(c<=0)return l}else if(l>0){if(c>=0)return l}else if(c)return c;return a[0]-s[0]};var n=t("robust-orientation");function i(t,e){var r,i,a,o;if(e[0][0]e[1][0])){var s=Math.min(t[0][1],t[1][1]),l=Math.max(t[0][1],t[1][1]),c=Math.min(e[0][1],e[1][1]),u=Math.max(e[0][1],e[1][1]);return lu?s-u:l-u}r=e[1],i=e[0]}t[0][1]0)if(e[0]!==o[1][0])r=t,t=t.right;else{if(l=c(t.right,e))return l;t=t.left}else{if(e[0]!==o[1][0])return t;var l;if(l=c(t.right,e))return l;t=t.left}}return r}function u(t,e,r,n){this.y=t,this.index=e,this.start=r,this.closed=n}function f(t,e,r,n){this.x=t,this.segment=e,this.create=r,this.index=n}s.prototype.castUp=function(t){var e=n.le(this.coordinates,t[0]);if(e<0)return-1;this.slabs[e];var r=c(this.slabs[e],t),i=-1;if(r&&(i=r.value),this.coordinates[e]===t[0]){var s=null;if(r&&(s=r.key),e>0){var u=c(this.slabs[e-1],t);u&&(s?o(u.key,s)>0&&(s=u.key,i=u.value):(i=u.value,s=u.key))}var f=this.horizontal[e];if(f.length>0){var h=n.ge(f,t[1],l);if(h=f.length)return i;p=f[h]}}if(p.start)if(s){var d=a(s[0],s[1],[t[0],p.y]);s[0][0]>s[1][0]&&(d=-d),d>0&&(i=p.index)}else i=p.index;else p.y!==t[1]&&(i=p.index)}}}return i}},{"./lib/order-segments":298,"binary-search-bounds":31,"functional-red-black-tree":69,"robust-orientation":284}],300:[function(t,e,r){"use strict";var n=t("robust-dot-product"),i=t("robust-sum");function a(t,e){var r=i(n(t,e),[e[e.length-1]]);return r[r.length-1]}function o(t,e,r,n){var i=-e/(n-e);i<0?i=0:i>1&&(i=1);for(var a=1-i,o=t.length,s=new Array(o),l=0;l0||i>0&&u<0){var f=o(s,u,l,i);r.push(f),n.push(f.slice())}u<0?n.push(l.slice()):u>0?r.push(l.slice()):(r.push(l.slice()),n.push(l.slice())),i=u}return{positive:r,negative:n}},e.exports.positive=function(t,e){for(var r=[],n=a(t[t.length-1],e),i=t[t.length-1],s=t[0],l=0;l0||n>0&&c<0)&&r.push(o(i,c,s,n)),c>=0&&r.push(s.slice()),n=c}return r},e.exports.negative=function(t,e){for(var r=[],n=a(t[t.length-1],e),i=t[t.length-1],s=t[0],l=0;l0||n>0&&c<0)&&r.push(o(i,c,s,n)),c<=0&&r.push(s.slice()),n=c}return r}},{"robust-dot-product":281,"robust-sum":289}],301:[function(t,e,r){!function(){"use strict";var t={not_string:/[^s]/,not_bool:/[^t]/,not_type:/[^T]/,not_primitive:/[^v]/,number:/[diefg]/,numeric_arg:/[bcdiefguxX]/,json:/[j]/,not_json:/[^j]/,text:/^[^\x25]+/,modulo:/^\x25{2}/,placeholder:/^\x25(?:([1-9]\d*)\$|\(([^)]+)\))?(\+)?(0|'[^$])?(-)?(\d+)?(?:\.(\d+))?([b-gijostTuvxX])/,key:/^([a-z_][a-z_\d]*)/i,key_access:/^\.([a-z_][a-z_\d]*)/i,index_access:/^\[(\d+)\]/,sign:/^[+-]/};function e(t){return i(o(t),arguments)}function n(t,r){return e.apply(null,[t].concat(r||[]))}function i(r,n){var i,a,o,s,l,c,u,f,h,p=1,d=r.length,m="";for(a=0;a=0),s.type){case"b":i=parseInt(i,10).toString(2);break;case"c":i=String.fromCharCode(parseInt(i,10));break;case"d":case"i":i=parseInt(i,10);break;case"j":i=JSON.stringify(i,null,s.width?parseInt(s.width):0);break;case"e":i=s.precision?parseFloat(i).toExponential(s.precision):parseFloat(i).toExponential();break;case"f":i=s.precision?parseFloat(i).toFixed(s.precision):parseFloat(i);break;case"g":i=s.precision?String(Number(i.toPrecision(s.precision))):parseFloat(i);break;case"o":i=(parseInt(i,10)>>>0).toString(8);break;case"s":i=String(i),i=s.precision?i.substring(0,s.precision):i;break;case"t":i=String(!!i),i=s.precision?i.substring(0,s.precision):i;break;case"T":i=Object.prototype.toString.call(i).slice(8,-1).toLowerCase(),i=s.precision?i.substring(0,s.precision):i;break;case"u":i=parseInt(i,10)>>>0;break;case"v":i=i.valueOf(),i=s.precision?i.substring(0,s.precision):i;break;case"x":i=(parseInt(i,10)>>>0).toString(16);break;case"X":i=(parseInt(i,10)>>>0).toString(16).toUpperCase()}t.json.test(s.type)?m+=i:(!t.number.test(s.type)||f&&!s.sign?h="":(h=f?"+":"-",i=i.toString().replace(t.sign,"")),c=s.pad_char?"0"===s.pad_char?"0":s.pad_char.charAt(1):" ",u=s.width-(h+i).length,l=s.width&&u>0?c.repeat(u):"",m+=s.align?h+i+l:"0"===c?h+l+i:l+h+i)}return m}var a=Object.create(null);function o(e){if(a[e])return a[e];for(var r,n=e,i=[],o=0;n;){if(null!==(r=t.text.exec(n)))i.push(r[0]);else if(null!==(r=t.modulo.exec(n)))i.push("%");else{if(null===(r=t.placeholder.exec(n)))throw new SyntaxError("[sprintf] unexpected placeholder");if(r[2]){o|=1;var s=[],l=r[2],c=[];if(null===(c=t.key.exec(l)))throw new SyntaxError("[sprintf] failed to parse named argument key");for(s.push(c[1]);""!==(l=l.substring(c[0].length));)if(null!==(c=t.key_access.exec(l)))s.push(c[1]);else{if(null===(c=t.index_access.exec(l)))throw new SyntaxError("[sprintf] failed to parse named argument key");s.push(c[1])}r[2]=s}else o|=2;if(3===o)throw new Error("[sprintf] mixing positional and named placeholders is not (yet) supported");i.push({placeholder:r[0],param_no:r[1],keys:r[2],sign:r[3],pad_char:r[4],align:r[5],width:r[6],precision:r[7],type:r[8]})}n=n.substring(r[0].length)}return a[e]=i}void 0!==r&&(r.sprintf=e,r.vsprintf=n),"undefined"!=typeof window&&(window.sprintf=e,window.vsprintf=n)}()},{}],302:[function(t,e,r){"use strict";e.exports=function(t,e){if(t.dimension<=0)return{positions:[],cells:[]};if(1===t.dimension)return function(t,e){for(var r=i(t,e),n=r.length,a=new Array(n),o=new Array(n),s=0;sn|0},vertex:function(t,e,r,n,i,a,o,s,l,c,u,f,h){var p=(o<<0)+(s<<1)+(l<<2)+(c<<3)|0;if(0!==p&&15!==p)switch(p){case 0:u.push([t-.5,e-.5]);break;case 1:u.push([t-.25-.25*(n+r-2*h)/(r-n),e-.25-.25*(i+r-2*h)/(r-i)]);break;case 2:u.push([t-.75-.25*(-n-r+2*h)/(n-r),e-.25-.25*(a+n-2*h)/(n-a)]);break;case 3:u.push([t-.5,e-.5-.5*(i+r+a+n-4*h)/(r-i+n-a)]);break;case 4:u.push([t-.25-.25*(a+i-2*h)/(i-a),e-.75-.25*(-i-r+2*h)/(i-r)]);break;case 5:u.push([t-.5-.5*(n+r+a+i-4*h)/(r-n+i-a),e-.5]);break;case 6:u.push([t-.5-.25*(-n-r+a+i)/(n-r+i-a),e-.5-.25*(-i-r+a+n)/(i-r+n-a)]);break;case 7:u.push([t-.75-.25*(a+i-2*h)/(i-a),e-.75-.25*(a+n-2*h)/(n-a)]);break;case 8:u.push([t-.75-.25*(-a-i+2*h)/(a-i),e-.75-.25*(-a-n+2*h)/(a-n)]);break;case 9:u.push([t-.5-.25*(n+r+-a-i)/(r-n+a-i),e-.5-.25*(i+r+-a-n)/(r-i+a-n)]);break;case 10:u.push([t-.5-.5*(-n-r-a-i+4*h)/(n-r+a-i),e-.5]);break;case 11:u.push([t-.25-.25*(-a-i+2*h)/(a-i),e-.75-.25*(i+r-2*h)/(r-i)]);break;case 12:u.push([t-.5,e-.5-.5*(-i-r-a-n+4*h)/(i-r+a-n)]);break;case 13:u.push([t-.75-.25*(n+r-2*h)/(r-n),e-.25-.25*(-a-n+2*h)/(a-n)]);break;case 14:u.push([t-.25-.25*(-n-r+2*h)/(n-r),e-.25-.25*(-i-r+2*h)/(i-r)]);break;case 15:u.push([t-.5,e-.5])}},cell:function(t,e,r,n,i,a,o,s,l){i?s.push([t,e]):s.push([e,t])}});return function(t,e){var r=[],i=[];return n(t,r,i,e),{positions:r,cells:i}}}};var o={}},{"ndarray-extract-contour":251,"zero-crossings":318}],303:[function(t,e,r){(function(r){(function(){"use strict";e.exports=function t(e,r,i){i=i||{};var o=a[e];o||(o=a[e]={" ":{data:new Float32Array(0),shape:.2}});var s=o[r];if(!s)if(r.length<=1||!/\d/.test(r))s=o[r]=function(t){for(var e=t.cells,r=t.positions,n=new Float32Array(6*e.length),i=0,a=0,o=0;o0&&(f+=.02);var p=new Float32Array(u),d=0,m=-.5*f;for(h=0;hMath.max(r,n)?i[2]=1:r>Math.max(e,n)?i[0]=1:i[1]=1;for(var a=0,o=0,l=0;l<3;++l)a+=t[l]*t[l],o+=i[l]*t[l];for(l=0;l<3;++l)i[l]-=o/a*t[l];return s(i,i),i}function h(t,e,r,i,a,o,s,l){this.center=n(r),this.up=n(i),this.right=n(a),this.radius=n([o]),this.angle=n([s,l]),this.angle.bounds=[[-1/0,-Math.PI/2],[1/0,Math.PI/2]],this.setDistanceLimits(t,e),this.computedCenter=this.center.curve(0),this.computedUp=this.up.curve(0),this.computedRight=this.right.curve(0),this.computedRadius=this.radius.curve(0),this.computedAngle=this.angle.curve(0),this.computedToward=[0,0,0],this.computedEye=[0,0,0],this.computedMatrix=new Array(16);for(var c=0;c<16;++c)this.computedMatrix[c]=.5;this.recalcMatrix(0)}var p=h.prototype;p.setDistanceLimits=function(t,e){t=t>0?Math.log(t):-1/0,e=e>0?Math.log(e):1/0,e=Math.max(e,t),this.radius.bounds[0][0]=t,this.radius.bounds[1][0]=e},p.getDistanceLimits=function(t){var e=this.radius.bounds[0];return t?(t[0]=Math.exp(e[0][0]),t[1]=Math.exp(e[1][0]),t):[Math.exp(e[0][0]),Math.exp(e[1][0])]},p.recalcMatrix=function(t){this.center.curve(t),this.up.curve(t),this.right.curve(t),this.radius.curve(t),this.angle.curve(t);for(var e=this.computedUp,r=this.computedRight,n=0,i=0,a=0;a<3;++a)i+=e[a]*r[a],n+=e[a]*e[a];var l=Math.sqrt(n),u=0;for(a=0;a<3;++a)r[a]-=e[a]*i/n,u+=r[a]*r[a],e[a]/=l;var f=Math.sqrt(u);for(a=0;a<3;++a)r[a]/=f;var h=this.computedToward;o(h,e,r),s(h,h);var p=Math.exp(this.computedRadius[0]),d=this.computedAngle[0],m=this.computedAngle[1],g=Math.cos(d),v=Math.sin(d),y=Math.cos(m),x=Math.sin(m),b=this.computedCenter,_=g*y,w=v*y,T=x,k=-g*x,A=-v*x,M=y,S=this.computedEye,E=this.computedMatrix;for(a=0;a<3;++a){var L=_*r[a]+w*h[a]+T*e[a];E[4*a+1]=k*r[a]+A*h[a]+M*e[a],E[4*a+2]=L,E[4*a+3]=0}var C=E[1],P=E[5],I=E[9],O=E[2],z=E[6],D=E[10],R=P*D-I*z,F=I*O-C*D,B=C*z-P*O,N=c(R,F,B);R/=N,F/=N,B/=N,E[0]=R,E[4]=F,E[8]=B;for(a=0;a<3;++a)S[a]=b[a]+E[2+4*a]*p;for(a=0;a<3;++a){u=0;for(var j=0;j<3;++j)u+=E[a+4*j]*S[j];E[12+a]=-u}E[15]=1},p.getMatrix=function(t,e){this.recalcMatrix(t);var r=this.computedMatrix;if(e){for(var n=0;n<16;++n)e[n]=r[n];return e}return r};var d=[0,0,0];p.rotate=function(t,e,r,n){if(this.angle.move(t,e,r),n){this.recalcMatrix(t);var i=this.computedMatrix;d[0]=i[2],d[1]=i[6],d[2]=i[10];for(var o=this.computedUp,s=this.computedRight,l=this.computedToward,c=0;c<3;++c)i[4*c]=o[c],i[4*c+1]=s[c],i[4*c+2]=l[c];a(i,i,n,d);for(c=0;c<3;++c)o[c]=i[4*c],s[c]=i[4*c+1];this.up.set(t,o[0],o[1],o[2]),this.right.set(t,s[0],s[1],s[2])}},p.pan=function(t,e,r,n){e=e||0,r=r||0,n=n||0,this.recalcMatrix(t);var i=this.computedMatrix,a=(Math.exp(this.computedRadius[0]),i[1]),o=i[5],s=i[9],l=c(a,o,s);a/=l,o/=l,s/=l;var u=i[0],f=i[4],h=i[8],p=u*a+f*o+h*s,d=c(u-=a*p,f-=o*p,h-=s*p),m=(u/=d)*e+a*r,g=(f/=d)*e+o*r,v=(h/=d)*e+s*r;this.center.move(t,m,g,v);var y=Math.exp(this.computedRadius[0]);y=Math.max(1e-4,y+n),this.radius.set(t,Math.log(y))},p.translate=function(t,e,r,n){this.center.move(t,e||0,r||0,n||0)},p.setMatrix=function(t,e,r,n){var a=1;"number"==typeof r&&(a=0|r),(a<0||a>3)&&(a=1);var o=(a+2)%3;e||(this.recalcMatrix(t),e=this.computedMatrix);var s=e[a],l=e[a+4],f=e[a+8];if(n){var h=Math.abs(s),p=Math.abs(l),d=Math.abs(f),m=Math.max(h,p,d);h===m?(s=s<0?-1:1,l=f=0):d===m?(f=f<0?-1:1,s=l=0):(l=l<0?-1:1,s=f=0)}else{var g=c(s,l,f);s/=g,l/=g,f/=g}var v,y,x=e[o],b=e[o+4],_=e[o+8],w=x*s+b*l+_*f,T=c(x-=s*w,b-=l*w,_-=f*w),k=l*(_/=T)-f*(b/=T),A=f*(x/=T)-s*_,M=s*b-l*x,S=c(k,A,M);if(k/=S,A/=S,M/=S,this.center.jump(t,q,G,Y),this.radius.idle(t),this.up.jump(t,s,l,f),this.right.jump(t,x,b,_),2===a){var E=e[1],L=e[5],C=e[9],P=E*x+L*b+C*_,I=E*k+L*A+C*M;v=R<0?-Math.PI/2:Math.PI/2,y=Math.atan2(I,P)}else{var O=e[2],z=e[6],D=e[10],R=O*s+z*l+D*f,F=O*x+z*b+D*_,B=O*k+z*A+D*M;v=Math.asin(u(R)),y=Math.atan2(B,F)}this.angle.jump(t,y,v),this.recalcMatrix(t);var N=e[2],j=e[6],U=e[10],V=this.computedMatrix;i(V,e);var H=V[15],q=V[12]/H,G=V[13]/H,Y=V[14]/H,W=Math.exp(this.computedRadius[0]);this.center.jump(t,q-N*W,G-j*W,Y-U*W)},p.lastT=function(){return Math.max(this.center.lastT(),this.up.lastT(),this.right.lastT(),this.radius.lastT(),this.angle.lastT())},p.idle=function(t){this.center.idle(t),this.up.idle(t),this.right.idle(t),this.radius.idle(t),this.angle.idle(t)},p.flush=function(t){this.center.flush(t),this.up.flush(t),this.right.flush(t),this.radius.flush(t),this.angle.flush(t)},p.setDistance=function(t,e){e>0&&this.radius.set(t,Math.log(e))},p.lookAt=function(t,e,r,n){this.recalcMatrix(t),e=e||this.computedEye,r=r||this.computedCenter;var i=(n=n||this.computedUp)[0],a=n[1],o=n[2],s=c(i,a,o);if(!(s<1e-6)){i/=s,a/=s,o/=s;var l=e[0]-r[0],f=e[1]-r[1],h=e[2]-r[2],p=c(l,f,h);if(!(p<1e-6)){l/=p,f/=p,h/=p;var d=this.computedRight,m=d[0],g=d[1],v=d[2],y=i*m+a*g+o*v,x=c(m-=y*i,g-=y*a,v-=y*o);if(!(x<.01&&(x=c(m=a*h-o*f,g=o*l-i*h,v=i*f-a*l))<1e-6)){m/=x,g/=x,v/=x,this.up.set(t,i,a,o),this.right.set(t,m,g,v),this.center.set(t,r[0],r[1],r[2]),this.radius.set(t,Math.log(p));var b=a*v-o*g,_=o*m-i*v,w=i*g-a*m,T=c(b,_,w),k=i*l+a*f+o*h,A=m*l+g*f+v*h,M=(b/=T)*l+(_/=T)*f+(w/=T)*h,S=Math.asin(u(k)),E=Math.atan2(M,A),L=this.angle._state,C=L[L.length-1],P=L[L.length-2];C%=2*Math.PI;var I=Math.abs(C+2*Math.PI-E),O=Math.abs(C-E),z=Math.abs(C-2*Math.PI-E);I0?r.pop():new ArrayBuffer(t)}function d(t){return new Uint8Array(p(t),0,t)}function m(t){return new Uint16Array(p(2*t),0,t)}function g(t){return new Uint32Array(p(4*t),0,t)}function v(t){return new Int8Array(p(t),0,t)}function y(t){return new Int16Array(p(2*t),0,t)}function x(t){return new Int32Array(p(4*t),0,t)}function b(t){return new Float32Array(p(4*t),0,t)}function _(t){return new Float64Array(p(8*t),0,t)}function w(t){return o?new Uint8ClampedArray(p(t),0,t):d(t)}function T(t){return s?new BigUint64Array(p(8*t),0,t):null}function k(t){return l?new BigInt64Array(p(8*t),0,t):null}function A(t){return new DataView(p(t),0,t)}function M(t){t=n.nextPow2(t);var e=n.log2(t),r=f[e];return r.length>0?r.pop():new a(t)}r.free=function(t){if(a.isBuffer(t))f[n.log2(t.length)].push(t);else{if("[object ArrayBuffer]"!==Object.prototype.toString.call(t)&&(t=t.buffer),!t)return;var e=t.length||t.byteLength,r=0|n.log2(e);u[r].push(t)}},r.freeUint8=r.freeUint16=r.freeUint32=r.freeBigUint64=r.freeInt8=r.freeInt16=r.freeInt32=r.freeBigInt64=r.freeFloat32=r.freeFloat=r.freeFloat64=r.freeDouble=r.freeUint8Clamped=r.freeDataView=function(t){h(t.buffer)},r.freeArrayBuffer=h,r.freeBuffer=function(t){f[n.log2(t.length)].push(t)},r.malloc=function(t,e){if(void 0===e||"arraybuffer"===e)return p(t);switch(e){case"uint8":return d(t);case"uint16":return m(t);case"uint32":return g(t);case"int8":return v(t);case"int16":return y(t);case"int32":return x(t);case"float":case"float32":return b(t);case"double":case"float64":return _(t);case"uint8_clamped":return w(t);case"bigint64":return k(t);case"biguint64":return T(t);case"buffer":return M(t);case"data":case"dataview":return A(t);default:return null}return null},r.mallocArrayBuffer=p,r.mallocUint8=d,r.mallocUint16=m,r.mallocUint32=g,r.mallocInt8=v,r.mallocInt16=y,r.mallocInt32=x,r.mallocFloat32=r.mallocFloat=b,r.mallocFloat64=r.mallocDouble=_,r.mallocUint8Clamped=w,r.mallocBigUint64=T,r.mallocBigInt64=k,r.mallocDataView=A,r.mallocBuffer=M,r.clearCache=function(){for(var t=0;t<32;++t)c.UINT8[t].length=0,c.UINT16[t].length=0,c.UINT32[t].length=0,c.INT8[t].length=0,c.INT16[t].length=0,c.INT32[t].length=0,c.FLOAT[t].length=0,c.DOUBLE[t].length=0,c.BIGUINT64[t].length=0,c.BIGINT64[t].length=0,c.UINT8C[t].length=0,u[t].length=0,f[t].length=0}}).call(this)}).call(this,void 0!==n?n:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{"bit-twiddle":32,buffer:3,dup:65}],309:[function(t,e,r){"use strict";function n(t){this.roots=new Array(t),this.ranks=new Array(t);for(var e=0;e0&&(a=n.size),n.lineSpacing&&n.lineSpacing>0&&(o=n.lineSpacing),n.styletags&&n.styletags.breaklines&&(s.breaklines=!!n.styletags.breaklines),n.styletags&&n.styletags.bolds&&(s.bolds=!!n.styletags.bolds),n.styletags&&n.styletags.italics&&(s.italics=!!n.styletags.italics),n.styletags&&n.styletags.subscripts&&(s.subscripts=!!n.styletags.subscripts),n.styletags&&n.styletags.superscripts&&(s.superscripts=!!n.styletags.superscripts));return r.font=[n.fontStyle,n.fontVariant,n.fontWeight,a+"px",n.font].filter((function(t){return t})).join(" "),r.textAlign="start",r.textBaseline="alphabetic",r.direction="ltr",h(function(t,e,r,n,a,o){r=r.replace(/\n/g,""),r=!0===o.breaklines?r.replace(/\/g,"\n"):r.replace(/\/g," ");var s="",l=[];for(p=0;p-1?parseInt(t[1+i]):0,l=a>-1?parseInt(r[1+a]):0;s!==l&&(n=n.replace(S(),"?px "),g*=Math.pow(.75,l-s),n=n.replace("?px ",S())),m+=.25*x*(l-s)}if(!0===o.superscripts){var c=t.indexOf("+"),u=r.indexOf("+"),f=c>-1?parseInt(t[1+c]):0,h=u>-1?parseInt(r[1+u]):0;f!==h&&(n=n.replace(S(),"?px "),g*=Math.pow(.75,h-f),n=n.replace("?px ",S())),m-=.25*x*(h-f)}if(!0===o.bolds){var p=t.indexOf("b|")>-1,d=r.indexOf("b|")>-1;!p&&d&&(n=v?n.replace("italic ","italic bold "):"bold "+n),p&&!d&&(n=n.replace("bold ",""))}if(!0===o.italics){var v=t.indexOf("i|")>-1,y=r.indexOf("i|")>-1;!v&&y&&(n="italic "+n),v&&!y&&(n=n.replace("italic ",""))}e.font=n}for(h=0;h",a="",o=i.length,s=a.length,l="+"===e[0]||"-"===e[0],c=0,u=-s;c>-1&&-1!==(c=r.indexOf(i,c))&&-1!==(u=r.indexOf(a,c+o))&&!(u<=c);){for(var f=c;f=u)n[f]=null,r=r.substr(0,f)+" "+r.substr(f+1);else if(null!==n[f]){var h=n[f].indexOf(e[0]);-1===h?n[f]+=e:l&&(n[f]=n[f].substr(0,h+1)+(1+parseInt(n[f][h+1]))+n[f].substr(h+2))}var p=c+o,d=r.substr(p,u-p).indexOf(i);c=-1!==d?d:u+s}return n}function u(t,e){var r=n(t,128);return e?a(r.cells,r.positions,.25):{edges:r.cells,positions:r.positions}}function f(t,e,r,n){var i=u(t,n),a=function(t,e,r){for(var n=e.textAlign||"start",i=e.textBaseline||"alphabetic",a=[1<<30,1<<30],o=[0,0],s=t.length,l=0;l=0?e[a]:i}))},has___:{value:y((function(e){var n=v(e);return n?r in n:t.indexOf(e)>=0}))},set___:{value:y((function(n,i){var a,o=v(n);return o?o[r]=i:(a=t.indexOf(n))>=0?e[a]=i:(a=t.length,e[a]=i,t[a]=n),this}))},delete___:{value:y((function(n){var i,a,o=v(n);return o?r in o&&delete o[r]:!((i=t.indexOf(n))<0)&&(a=t.length-1,t[i]=void 0,e[i]=e[a],t[i]=t[a],t.length=a,e.length=a,!0)}))}})};d.prototype=Object.create(Object.prototype,{get:{value:function(t,e){return this.get___(t,e)},writable:!0,configurable:!0},has:{value:function(t){return this.has___(t)},writable:!0,configurable:!0},set:{value:function(t,e){return this.set___(t,e)},writable:!0,configurable:!0},delete:{value:function(t){return this.delete___(t)},writable:!0,configurable:!0}}),"function"==typeof r?function(){function n(){this instanceof d||x();var e,n=new r,i=void 0,a=!1;return e=t?function(t,e){return n.set(t,e),n.has(t)||(i||(i=new d),i.set(t,e)),this}:function(t,e){if(a)try{n.set(t,e)}catch(r){i||(i=new d),i.set___(t,e)}else n.set(t,e);return this},Object.create(d.prototype,{get___:{value:y((function(t,e){return i?n.has(t)?n.get(t):i.get___(t,e):n.get(t,e)}))},has___:{value:y((function(t){return n.has(t)||!!i&&i.has___(t)}))},set___:{value:y(e)},delete___:{value:y((function(t){var e=!!n.delete(t);return i&&i.delete___(t)||e}))},permitHostObjects___:{value:y((function(t){if(t!==m)throw new Error("bogus call to permitHostObjects___");a=!0}))}})}t&&"undefined"!=typeof Proxy&&(Proxy=void 0),n.prototype=d.prototype,e.exports=n,Object.defineProperty(WeakMap.prototype,"constructor",{value:WeakMap,enumerable:!1,configurable:!0,writable:!0})}():("undefined"!=typeof Proxy&&(Proxy=void 0),e.exports=d)}function m(t){t.permitHostObjects___&&t.permitHostObjects___(m)}function g(t){return!("weakmap:"==t.substr(0,"weakmap:".length)&&"___"===t.substr(t.length-3))}function v(t){if(t!==Object(t))throw new TypeError("Not an object: "+t);var e=t[l];if(e&&e.key===t)return e;if(s(t)){e={key:t};try{return o(t,l,{value:e,writable:!1,enumerable:!1,configurable:!1}),e}catch(t){return}}}function y(t){return t.prototype=null,Object.freeze(t)}function x(){h||"undefined"==typeof console||(h=!0,console.warn("WeakMap should be invoked as new WeakMap(), not WeakMap(). This will be an error in the future."))}}()},{}],314:[function(t,e,r){var n=t("./hidden-store.js");e.exports=function(){var t={};return function(e){if(("object"!=typeof e||null===e)&&"function"!=typeof e)throw new Error("Weakmap-shim: Key must be object");var r=e.valueOf(t);return r&&r.identity===t?r:n(e,t)}}},{"./hidden-store.js":315}],315:[function(t,e,r){e.exports=function(t,e){var r={identity:e},n=t.valueOf;return Object.defineProperty(t,"valueOf",{value:function(t){return t!==e?n.apply(this,arguments):r},writable:!0}),r}},{}],316:[function(t,e,r){var n=t("./create-store.js");e.exports=function(){var t=n();return{get:function(e,r){var n=t(e);return n.hasOwnProperty("value")?n.value:r},set:function(e,r){return t(e).value=r,this},has:function(e){return"value"in t(e)},delete:function(e){return delete t(e).value}}}},{"./create-store.js":314}],317:[function(t,e,r){"use strict";var n,i=function(){return function(t,e,r,n,i,a){var o=t[0],s=r[0],l=[0],c=s;n|=0;var u=0,f=s;for(u=0;u=0!=p>=0&&i.push(l[0]+.5+.5*(h+p)/(h-p)),n+=f,++l[0]}}};e.exports=(n={funcName:{funcName:"zeroCrossings"}.funcName},function(t){var e={};return function(r,n,i){var a=r.dtype,o=r.order,s=[a,o.join()].join(),l=e[s];return l||(e[s]=l=t([a,o])),l(r.shape.slice(0),r.data,r.stride,0|r.offset,n,i)}}(i.bind(void 0,n)))},{}],318:[function(t,e,r){"use strict";e.exports=function(t,e){var r=[];return e=+e||0,n(t.hi(t.shape[0]-1),r,e),r};var n=t("./lib/zc-core")},{"./lib/zc-core":317}]},{},[6])(6)}))}).call(this)}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}]},{},[27])(27)})); \ No newline at end of file diff --git a/_freeze/site_libs/resize-iframes-0.0.1/resize-iframes.js b/_freeze/site_libs/resize-iframes-0.0.1/resize-iframes.js deleted file mode 100644 index be900221e..000000000 --- a/_freeze/site_libs/resize-iframes-0.0.1/resize-iframes.js +++ /dev/null @@ -1,26 +0,0 @@ -function adjustIframeHeight(iframe) { - const iframeDocument = iframe.contentDocument || iframe.contentWindow.document; - if (!iframeDocument) return; - - // Temporarily force horizontal scroll to measure its size - const hasHorizontalScroll = iframeDocument.documentElement.scrollWidth > iframeDocument.documentElement.clientWidth; - const scrollbarHeight = hasHorizontalScroll ? 17 : 0; // Approx height of horizontal scrollbar - - // Adjust height to content's scrollHeight minus scrollbar if needed - iframe.style.height = (iframeDocument.documentElement.scrollHeight + scrollbarHeight) + 'px'; -} - -function adjustAllIframes() { - document.querySelectorAll('.resizable-iframe').forEach((iframe) => { - adjustIframeHeight(iframe); - }); -} - -// Adjust height when iframe is loaded -document.querySelectorAll('.resizable-iframe').forEach((iframe) => { - iframe.addEventListener('load', () => adjustIframeHeight(iframe)); -}); - -// Re-adjust on window resize -window.addEventListener('resize', adjustAllIframes); -window.addEventListener('load', adjustAllIframes); diff --git a/_freeze/site_libs/sas_widget-binding-0.1.0.9000/sas_widget.js b/_freeze/site_libs/sas_widget-binding-0.1.0.9000/sas_widget.js deleted file mode 100644 index 25382508a..000000000 --- a/_freeze/site_libs/sas_widget-binding-0.1.0.9000/sas_widget.js +++ /dev/null @@ -1,54 +0,0 @@ -HTMLWidgets.widget({ - - name: 'sas_widget', - - type: 'output', - - factory: function (el, width, height) { - - // TODO: define shared variables for this instance - - return { - - renderValue: function (x) { - let lst = x.lst; - let log = x.log; - let capture = x.capture; - - if (capture == "both") { - el.innerHTML = ` - - -
-
- -
-
${log}
-
- `; - } else if (capture == "listing") { - el.innerHTML = ` - - `; - } else if (capture == "log") { - el.innerHTML = ` -
${log}
- `; - } - - }, - - resize: function (width, height) { - - } - - }; - } -}); \ No newline at end of file diff --git a/_freeze/site_libs/sas_widget-binding-0.1.0.9003/sas_widget.js b/_freeze/site_libs/sas_widget-binding-0.1.0.9003/sas_widget.js deleted file mode 100644 index 25382508a..000000000 --- a/_freeze/site_libs/sas_widget-binding-0.1.0.9003/sas_widget.js +++ /dev/null @@ -1,54 +0,0 @@ -HTMLWidgets.widget({ - - name: 'sas_widget', - - type: 'output', - - factory: function (el, width, height) { - - // TODO: define shared variables for this instance - - return { - - renderValue: function (x) { - let lst = x.lst; - let log = x.log; - let capture = x.capture; - - if (capture == "both") { - el.innerHTML = ` - - -
-
- -
-
${log}
-
- `; - } else if (capture == "listing") { - el.innerHTML = ` - - `; - } else if (capture == "log") { - el.innerHTML = ` -
${log}
- `; - } - - }, - - resize: function (width, height) { - - } - - }; - } -}); \ No newline at end of file diff --git a/_freeze/templates/RvsSAS_template/execute-results/html.json b/_freeze/templates/RvsSAS_template/execute-results/html.json deleted file mode 100644 index 0d23f6338..000000000 --- a/_freeze/templates/RvsSAS_template/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "3c42a2aad62183c0a92e80404388f0b8", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \"R vs SAS [Name of Method]\"\n---\n\n\n\n# R vs SAS \\[Name of Method\\]\n\n## Introduction\n\nProvide a brief overview of the analysis, its purpose, and its applications.\n\n**Example:** This section compares the implementation of Poisson Regression in R and SAS. Poisson regression is used to model count data and contingency tables. It's particularly useful for modeling the number of events occurring within a fixed period of time or space.\n\n## General Comparison Table\n\nThe following table provides an overview of the support and results comparability between R and SAS for the new analysis point.\n\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n+===================================+===================================+=====================================+====================+==========================================================+\n| [Scenario 1: Basic Functionality] | Example: [Yes](../R/example_link) | Example: [Yes](../SAS/example_link) | Example 1: Yes | Specific settings or packages required for exact match |\n| | | | | |\n| | | | Example 2: No | |\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n| [Scenario 2: Advanced Feature] | Example: [Yes](../R/example_link) | Example: [Yes](../SAS/example_link) | Example 3: Partial | Special considerations for data structure or assumptions |\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n\n## Prerequisites: R Packages\n\nList the R packages required for this analysis. Include a brief description if necessary.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Example R packages required\nlibrary() # Call what packages are needed\nlibrary()\n```\n:::\n\n\n\n## Example Data\n\nProvide an example of the dataset being used. For simulated / dummy datasets please provide some context on how you construct the dataset and its relevance to the methodology. For real data please provide the code for how these data can be accessed and a reference that describes the data source.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Simulate an example dataset for Poisson Regression\nset.seed(123)\nexample_data <- data.frame(\n count = rpois(100, lambda = 2),\n predictor = rnorm(100)\n)\n\n# Display the first few rows of the dataset\nhead(example_data)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n count predictor\n1 1 0.25331851\n2 3 -0.02854676\n3 2 -0.04287046\n4 4 1.36860228\n5 4 -0.22577099\n6 0 1.51647060\n```\n\n\n:::\n:::\n\n\n\n``` sas\n/* Example dataset in SAS */\ndata example_data;\n do id = 1 to 100;\n count = ranpoi(123, 2); /* random Poisson variable */\n predictor = rannor(123); /* random normal variable */\n output;\n end;\nrun;\n\n/* Display the first few rows of the dataset */\nproc print data=example_data(obs=10);\nrun;\n```\n\n## Analysis Scenarios\n\n### Scenario 1: Basic Functionality\n\nProvide a detailed description of the scenario.\n\n#### SAS Code Example\n\n``` sas\n/* SAS code for basic Poisson Regression */\nproc genmod data=example_data;\n class predictor;\n model count = predictor / dist=poisson link=log;\nrun;\n```\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n#### R Code Example\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# R code for basic Poisson Regression\nexample_model <- glm(count ~ predictor, family = poisson(link = \"log\"), data = example_data)\n\n# Summary of the model\nsummary(example_model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = count ~ predictor, family = poisson(link = \"log\"), \n data = example_data)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) 0.70395 0.07041 9.998 <2e-16 ***\npredictor 0.01897 0.07320 0.259 0.796 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for poisson family taken to be 1)\n\n Null deviance: 110.60 on 99 degrees of freedom\nResidual deviance: 110.53 on 98 degrees of freedom\nAIC: 343.61\n\nNumber of Fisher Scoring iterations: 5\n```\n\n\n:::\n:::\n\n\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n#### Results Comparison\n\nProvide a detailed comparison of the results obtained from both SAS and R. Highlight any differences and provide explanations if possible.\n\n| Statistic | R Result | SAS Result | Match | Notes |\n|------------------------------------|----------|------------|-------|-------|\n| Degrees of Freedom | 98 | 98 | Yes | |\n| Coefficient Estimate for Predictor | 0.1 | 0.1 | Yes | |\n| p-value | 0.05 | 0.05 | Yes | |\n\n### Scenario 2: Advanced Feature\n\nProvide a detailed description of the scenario. **Example:** Address specific advanced features or configurations that may be necessary for more complex analyses.\n\n#### SAS Code Example\n\n``` sas\n/* SAS code for handling overdispersion */\nproc genmod data=example_data;\n class predictor;\n model count = predictor / dist=poisson link=log scale=pearson;\nrun;\n```\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n#### R Code Example\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# R code for handling overdispersion\nalternative_model <- glm(count ~ predictor, family = quasipoisson(link = \"log\"), data = example_data)\n\n# Summary of the alternative model\nsummary(alternative_model)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = count ~ predictor, family = quasipoisson(link = \"log\"), \n data = example_data)\n\nCoefficients:\n Estimate Std. Error t value Pr(>|t|) \n(Intercept) 0.70395 0.06969 10.101 <2e-16 ***\npredictor 0.01897 0.07245 0.262 0.794 \n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for quasipoisson family taken to be 0.9797449)\n\n Null deviance: 110.60 on 99 degrees of freedom\nResidual deviance: 110.53 on 98 degrees of freedom\nAIC: NA\n\nNumber of Fisher Scoring iterations: 5\n```\n\n\n:::\n:::\n\n\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n#### Results Comparison\n\nProvide a detailed comparison of the results obtained from both SAS and R. Highlight any differences and provide explanations if possible.\n\n| Statistic | R Result | SAS Result | Match | Notes |\n|------------------------------------|----------|------------|-------|-------|\n| Degrees of Freedom | 98 | 98 | Yes | |\n| Coefficient Estimate for Predictor | 0.1 | 0.1 | Yes | |\n| p-value | 0.05 | 0.05 | Yes | |\n\n## Special Considerations\n\nAddress any additional features or settings that need to be considered. This might include specific configuration settings, handling of special cases, or performance considerations.\n\n**Example:** For handling overdispersion in Poisson Regression, SAS provides the `scale` option in PROC GENMOD, while in R, one may have to switch to a quasi-Poisson family or use negative binomial regression.\n\n## Summary and Recommendation\n\nSummarize the key findings from the analysis. Highlight any major differences observed and offer recommendations on which software/packages to use. Provide guidance on how to interpret the differences between the results obtained from various software of packages.\n\n## Troubleshooting and Edge Cases\n\nList potential issues that users may encounter and propose solutions or troubleshooting steps.\n\n**Example:**\n\n- **Issue:** Non-convergence in Poisson Regression.\n\n- **Solution:** Check for multicollinearity among predictors, scale the predictors, or switch to a more appropriate model family.\n\n## Additional References\n\nProvide references and additional reading materials for both R and SAS documentation related to the analysis.\n\n**R Documentation:**\n\n- `glm` function: \n\n**SAS Documentation:**\n\n- `PROC GENMOD`: \n\n## Appendix (Optional)\n\nInclude any additional information, extended examples, or advanced topics that may be helpful for expert users.\n\nThis template is designed to be comprehensive and flexible, able to accommodate multiple scenarios and diverse datasets. It ensures that each section clearly communicates the necessary information and comparisons, making it easier for users to understand and apply the analyses appropriately across both R and SAS platforms.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.4.3 (2025-02-28)\n os Ubuntu 24.04.2 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate C.UTF-8\n ctype C.UTF-8\n tz Europe/London\n date 2025-03-13\n pandoc NA (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.3 2024-06-21 [?] RSPM (R 4.4.0)\n P digest 0.6.37 2024-08-19 [?] RSPM (R 4.4.0)\n P evaluate 1.0.0 2024-09-17 [?] RSPM (R 4.4.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.4.0)\n P htmltools 0.5.8.1 2024-04-04 [?] RSPM (R 4.4.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.4.0)\n P jsonlite 1.8.9 2024-09-20 [?] RSPM (R 4.4.0)\n P knitr 1.48 2024-07-07 [?] RSPM (R 4.4.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.4.3)\n P rlang 1.1.4 2024-06-04 [?] RSPM (R 4.4.0)\n P rmarkdown 2.28 2024-08-17 [?] RSPM (R 4.4.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.4.0)\n P xfun 0.48 2024-10-03 [?] RSPM (R 4.4.0)\n P yaml 2.3.10 2024-07-26 [?] RSPM (R 4.4.0)\n\n [1] /home/michael/source/CAMIS/renv/library/linux-ubuntu-noble/R-4.4/x86_64-pc-linux-gnu\n [2] /opt/R/4.4.3/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n─ External software ──────────────────────────────────────────────────────────\n setting value\n SAS 9.04.01M7P080520\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_freeze/templates/multi_language_template/execute-results/html.json b/_freeze/templates/multi_language_template/execute-results/html.json index 734f2cf7a..c98c3e51c 100644 --- a/_freeze/templates/multi_language_template/execute-results/html.json +++ b/_freeze/templates/multi_language_template/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "9ec6c5376e0eb70ced380a929e140142", + "hash": "a8351ce86f4758268517556c002a757d", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"(Language) vs (Language): (Method Name)\"\nexecute: \n eval: false\n---\n\n\n\n# Introduction\n\nThis first section should provide a brief background on the methodology with links to associated journal articles, or relevant sources. This should give the reader a high level overview of the method and its implementation. This will be helpful in setting the stage for the examples and discussion that follow.\n\n## Comparisons of Languages\n\nWhen comparing between languages, it is helpful to have a table with links to the pages with deeper dive of each language for a given method method. For example, this table shows summaries for ANCOVA between R and SAS:\n\n+-----------------------------------------------+-------------------------+---------------------------+---------------+------------------------------------------------------------------------------------------+\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n+===============================================+=========================+===========================+===============+==========================================================================================+\n| ANCOVA using general linear model and lsmeans | [Yes](../R/ancova.html) | [Yes](../SAS/ancova.html) | [Yes](#comp) | `GLM()` function from **sasLM** with `EMEANS=TRUE` is the easiest to use and matches SAS |\n+-----------------------------------------------+-------------------------+---------------------------+---------------+------------------------------------------------------------------------------------------+\n\nFurther, this table provides a summary of the examples that will be showcased further down in the document related to Poisson regression in R and SAS:\n\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n+===================================+===================================+=====================================+====================+==========================================================+\n| [Scenario 1: Basic Functionality] | Example: [Yes](../R/example_link) | Example: [Yes](../SAS/example_link) | Example 1: Yes | Specific settings or packages required for exact match |\n| | | | | |\n| | | | Example 2: No | |\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n| [Scenario 2: Advanced Feature] | Example: [Yes](../R/example_link) | Example: [Yes](../SAS/example_link) | Example 3: Partial | Special considerations for data structure or assumptions |\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n\n## Libraries or Extensions Needed\n\nThis section should describe what libraries, packages, or additional materials needed for the analysis described.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(lme4)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: Matrix\n```\n\n\n:::\n:::\n\n\n# Data Sources for the Analysis\n\nFor real data, please provide a means of accessing these data and describe the data source used in the example, especially if it is a new dataset not already in the `CAMIS/data/` folder.\n\nFor simulated/dummy datasets, please provide reproducible code to generate these data. This includes setting a random seed for reproducible random number generation.\n\n::: panel-tabset\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Simulated dataset for Poisson Regression\nset.seed(123)\nexample_data <- data.frame(\n count = rpois(100, lambda = 2),\n predictor = rnorm(100)\n)\n```\n:::\n\n\n## SAS\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n/* Set random seed for reproducibility */\ndata example_data;\n call streaminit(123);\n do id = 1 to 100;\n count = ranpoi(0, 2); /* Generate random Poisson variable */\n predictor = rannor(0);/* Generate random normal variable */\n output;\n end;\nrun;\n```\n:::\n\n:::\n\n*Again, it is important to give credit to the original source of the data used in your example*\n\n# Statistical Method\n\nProvide any further details or clarifying comments about the methods comparison between the languages here.\n\nNow you can start adding specific examples of how to use these packages or methods to conduct the analysis. It is helpful to include some notes and comments throughout.\n\n## Scenario 1: Basic Functionality\n\n**Example:** This section compares the implementation of Poisson Regression in R and SAS. Poisson regression is used to model count data and contingency tables. It's particularly useful for modeling the number of events occurring within a fixed period of time or space.\n\n::: panel-tabset\n### R\n\nFor R packages, it is helpful to prepend package names to functions so new readers can understand where specific functions originate, especially with new packages.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# R code for basic Poisson Regression\nexample_model <- stats::glm(\n count ~ predictor,\n family = stpoisson(link = \"log\"),\n data = example_data\n)\n\n# Summary of the model\nsummary(example_model)\n```\n:::\n\n\n### SAS\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n/* SAS code for basic Poisson Regression */\nproc genmod data=example_data;\n class predictor;\n model count = predictor / dist=poisson link=log;\nrun;\n```\n:::\n\n:::\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n## Scenario 2: Advanced Feature\n\nProvide a detailed description of the scenario. **Example:** Address specific advanced features or configurations that may be necessary for more complex analyses.\n\n::: panel-tabset\n### R Code\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# R code for handling overdispersion\nalternative_model <- stats::glm(\n count ~ predictor,\n family = quasipoisson(link = \"log\"),\n data = example_data\n)\n\n# Summary of the alternative model\nsummary(alternative_model)\n```\n:::\n\n\n### SAS\n\n\n::: {.cell}\n\n```{.sas .cell-code}\n/* SAS code for handling overdispersion */\nproc genmod data=example_data;\n class predictor;\n model count = predictor / dist=poisson link=log scale=pearson;\nrun;\n```\n:::\n\n:::\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n## Comparison of Languages\n\nProvide a detailed comparison of the results obtained from the comparison of the langauges and methods. Highlight any differences/similarities and provide explanations if possible.\n\n+------------------------------------+----------+------------+----------+----------+\n| Statistic | R Result | SAS Result | Match | Notes |\n+====================================+==========+============+==========+==========+\n| Degrees of Freedom | 98 | 98 | Yes | |\n+------------------------------------+----------+------------+----------+----------+\n| Coefficient Estimate for Predictor | 0.1 | 0.1 | Yes | |\n+------------------------------------+----------+------------+----------+----------+\n| p-value | 0.05 | 0.05 | Yes | |\n+------------------------------------+----------+------------+----------+----------+\n\n## Special Considerations\n\nAddress any additional features or settings that need to be considered. This might include specific configuration settings, handling of special cases, or performance considerations.\n\n**Example:** For handling overdispersion in Poisson Regression, SAS provides the `scale` option in PROC GENMOD, while in R, one may have to switch to a quasi-Poisson family or use negative binomial regression.\n\n### Troubleshooting and Edge Cases\n\nList potential issues that users may encounter and propose solutions or troubleshooting steps.\n\n**Example:**\n\n- **Issue:** Non-convergence in Poisson Regression.\n\n- **Solution:** Check for multicollinearity among predictors, scale the predictors, or switch to a more appropriate model family.\n\n## Conclusion\n\nFinally, add a conclusion section to the page. This may take on different forms but should broadly summarize the findings in the comparison of languages, packages, or approaches. In summarizing, be sure to include the advantages/limitations of the packages and approaches so the reader can understand the capabilities of the approaches to the statstical methodology.\n\nThere may be instances where you recommend specific languages, packages, or functions. Be sure to provide your rationale for these recommendations.\n\n## References\n\nBe sure to include any references or sources used for the analysis here. These could be external links to pages or in-text citations. This will all help the reader find material needed for further evaluation.\n\n**R Documentation:**\n\n- [`glm` function](https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/glm)\n\n**SAS Documentation:**\n\n- [`PROC GENMOD`](https://documentation.sas.com/doc/en/statcdc/14.2/statug/statug_genmod_overview.htm)\n\nAlso, include this `Session Info` section. Manually add the packages used in your analysis in a vector, like shown below. This captures the environment used, which is important for reproducibility.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n\n:::\n\n:::", + "markdown": "---\ntitle: \"(Language) vs (Language): (Method Name)\"\nexecute: \n eval: false\n---\n\n\n\n# Introduction\n\nThis first section should provide a brief background on the methodology with links to associated journal articles, or relevant sources. This should give the reader a high level overview of the method and its implementation. This will be helpful in setting the stage for the examples and discussion that follow.\n\n## Comparisons of Languages\n\nWhen comparing between languages, it is helpful to have a table with links to the pages with deeper dive of each language for a given method method. For example, this table shows summaries for ANCOVA between R and SAS:\n\n+-----------------------------------------------+-------------------------+---------------------------+---------------+------------------------------------------------------------------------------------------+\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n+===============================================+=========================+===========================+===============+==========================================================================================+\n| ANCOVA using general linear model and lsmeans | [Yes](../R/ancova.html) | [Yes](../SAS/ancova.html) | [Yes](#comp) | `GLM()` function from **sasLM** with `EMEANS=TRUE` is the easiest to use and matches SAS |\n+-----------------------------------------------+-------------------------+---------------------------+---------------+------------------------------------------------------------------------------------------+\n\nFurther, this table provides a summary of the examples that will be showcased further down in the document related to Poisson regression in R and SAS:\n\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n+===================================+===================================+=====================================+====================+==========================================================+\n| [Scenario 1: Basic Functionality] | Example: [Yes](../R/example_link) | Example: [Yes](../SAS/example_link) | Example 1: Yes | Specific settings or packages required for exact match |\n| | | | | |\n| | | | Example 2: No | |\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n| [Scenario 2: Advanced Feature] | Example: [Yes](../R/example_link) | Example: [Yes](../SAS/example_link) | Example 3: Partial | Special considerations for data structure or assumptions |\n+-----------------------------------+-----------------------------------+-------------------------------------+--------------------+----------------------------------------------------------+\n\n## Libraries or Extensions Needed\n\nThis section should describe what libraries, packages, or additional materials needed for the analysis described.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(lme4)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nLoading required package: Matrix\n```\n\n\n:::\n:::\n\n\n# Data Sources for the Analysis\n\nFor real data, please provide a means of accessing these data and describe the data source used in the example, especially if it is a new dataset not already in the `CAMIS/data/` folder.\n\nFor simulated/dummy datasets, please provide reproducible code to generate these data. This includes setting a random seed for reproducible random number generation.\n\n::: panel-tabset\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# Simulated dataset for Poisson Regression\nset.seed(123)\nexample_data <- data.frame(\n count = rpois(100, lambda = 2),\n predictor = rnorm(100)\n)\n```\n:::\n\n\n## SAS\n\n```sas\n/* Set random seed for reproducibility */\ndata example_data;\n call streaminit(123);\n do id = 1 to 100;\n count = ranpoi(0, 2); /* Generate random Poisson variable */\n predictor = rannor(0);/* Generate random normal variable */\n output;\n end;\nrun;\n```\n:::\n\n*Again, it is important to give credit to the original source of the data used in your example*\n\n# Statistical Method\n\nProvide any further details or clarifying comments about the methods comparison between the languages here.\n\nNow you can start adding specific examples of how to use these packages or methods to conduct the analysis. It is helpful to include some notes and comments throughout.\n\n## Scenario 1: Basic Functionality\n\n**Example:** This section compares the implementation of Poisson Regression in R and SAS. Poisson regression is used to model count data and contingency tables. It's particularly useful for modeling the number of events occurring within a fixed period of time or space.\n\n::: panel-tabset\n### R\n\nFor R packages, it is helpful to prepend package names to functions so new readers can understand where specific functions originate, especially with new packages.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# R code for basic Poisson Regression\nexample_model <- stats::glm(\n count ~ predictor,\n family = stpoisson(link = \"log\"),\n data = example_data\n)\n\n# Summary of the model\nsummary(example_model)\n```\n:::\n\n\n### SAS\n\n```sas\n/* SAS code for basic Poisson Regression */\nproc genmod data=example_data;\n class predictor;\n model count = predictor / dist=poisson link=log;\nrun;\n```\n:::\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n## Scenario 2: Advanced Feature\n\nProvide a detailed description of the scenario. **Example:** Address specific advanced features or configurations that may be necessary for more complex analyses.\n\n::: panel-tabset\n### R Code\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# R code for handling overdispersion\nalternative_model <- stats::glm(\n count ~ predictor,\n family = quasipoisson(link = \"log\"),\n data = example_data\n)\n\n# Summary of the alternative model\nsummary(alternative_model)\n```\n:::\n\n\n### SAS\n\n```sas\n/* SAS code for handling overdispersion */\nproc genmod data=example_data;\n class predictor;\n model count = predictor / dist=poisson link=log scale=pearson;\nrun;\n```\n:::\n\nDescribe key options utilized in the code, along with a screenshot showcasing the output.\n\n## Comparison of Languages\n\nProvide a detailed comparison of the results obtained from the comparison of the langauges and methods. Highlight any differences/similarities and provide explanations if possible.\n\n+------------------------------------+----------+------------+----------+----------+\n| Statistic | R Result | SAS Result | Match | Notes |\n+====================================+==========+============+==========+==========+\n| Degrees of Freedom | 98 | 98 | Yes | |\n+------------------------------------+----------+------------+----------+----------+\n| Coefficient Estimate for Predictor | 0.1 | 0.1 | Yes | |\n+------------------------------------+----------+------------+----------+----------+\n| p-value | 0.05 | 0.05 | Yes | |\n+------------------------------------+----------+------------+----------+----------+\n\n## Special Considerations\n\nAddress any additional features or settings that need to be considered. This might include specific configuration settings, handling of special cases, or performance considerations.\n\n**Example:** For handling overdispersion in Poisson Regression, SAS provides the `scale` option in PROC GENMOD, while in R, one may have to switch to a quasi-Poisson family or use negative binomial regression.\n\n### Troubleshooting and Edge Cases\n\nList potential issues that users may encounter and propose solutions or troubleshooting steps.\n\n**Example:**\n\n- **Issue:** Non-convergence in Poisson Regression.\n\n- **Solution:** Check for multicollinearity among predictors, scale the predictors, or switch to a more appropriate model family.\n\n## Conclusion\n\nFinally, add a conclusion section to the page. This may take on different forms but should broadly summarize the findings in the comparison of languages, packages, or approaches. In summarizing, be sure to include the advantages/limitations of the packages and approaches so the reader can understand the capabilities of the approaches to the statstical methodology.\n\nThere may be instances where you recommend specific languages, packages, or functions. Be sure to provide your rationale for these recommendations.\n\n## References\n\nBe sure to include any references or sources used for the analysis here. These could be external links to pages or in-text citations. This will all help the reader find material needed for further evaluation.\n\n**R Documentation:**\n\n- [`glm` function](https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/glm)\n\n**SAS Documentation:**\n\n- [`PROC GENMOD`](https://documentation.sas.com/doc/en/statcdc/14.2/statug/statug_genmod_overview.htm)\n\nAlso, include this `Session Info` section. Manually add the packages used in your analysis in a vector, like shown below. This captures the environment used, which is important for reproducibility.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n\n:::\n\n:::", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/templates/single_language_template/execute-results/html.json b/_freeze/templates/single_language_template/execute-results/html.json index 3891d722e..cf4a154ab 100644 --- a/_freeze/templates/single_language_template/execute-results/html.json +++ b/_freeze/templates/single_language_template/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "cc34ebf96a0285fb871bd3832c29999c", + "hash": "204e3b35f41b0e4112f695382dcfe4a5", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"(Language): (Method Name)\"\nexecute: \n eval: false\n---\n\n\n\n# Introduction\n\nThis first section should provide a brief background on the methodology with links to associated journal articles, or relevant sources. This should give the reader a high level overview of the method and its implementation. This will be helpful in setting the stage for the examples and discussion that follow.\n\n# Libraries or Extensions Needed\n\nThis section should describe what libraries, packages, or additional materials needed for the analysis described.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(readr)\n```\n:::\n\n\n# Data Sources for the Analysis\n\nFor real data, please provide a means of accessing these data and describe the data source used in the example, especially if it is a new dataset not already in the `CAMIS/data/` folder.\n\nFor simulated/dummy datasets, please provide reproducible code to generate these data. This includes setting a random seed for reproducible random number generation.\n\n::: panel-tabset\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexData <- tibble::tribble( \n ~Var1, ~Var2, \n \"val1\", 1, \n \"val2\", 2)\n```\n:::\n\n\n## SAS\n\n\n::: {.cell}\n\n```{.sas .cell-code}\ndata ExData; \n input Var1 $ Var2 ...; \n datalines; \n[Data Lines]; \nrun; \n```\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nimport pandas as pd\n\ndata = {\"score\": [40, 47, 52, 26, 19, 25], \"count\": [2, 2, 2, 1, 2, 2]}\ndf = pd.DataFrame(data)\n```\n:::\n\n:::\n\n*Again, it is important to give credit to the original source of the data used in your example.*\n\n# Statistical Method\n\n## Example Code using \\< package name \\>\n\nNow you can start adding specific examples of how to use these packages or methods to conduct the analysis. It is helpful to include some notes and comments throughout.\n\nFor R packages, it is helpful to prepend package names to functions so new readers can understand where specific functions originate, especially with new packages.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# For R packages\nlme4::lmer()\n\nsurvival::Surv()\n```\n:::\n\n\n## Comparison of Packages\n\nIf you are comparing more than one package of a single language, consider adding in a table to illustrate some of the differences/similarities between the packages/methods.\n\n## Conclusion\n\nFinally, add a conclusion section to the page. This may take on different forms but should broadly summarize the findings in the comparison of languages, packages, or approaches. In summarizing, be sure to include the advantages/limitations of the packages and approaches so the reader can understand the capabilities of the approaches to the statstical methodology.\n\nThere may be instances where you recommend specific languages, packages, or functions. Be sure to provide your rationale for these recommendations.\n\n## References\n\nBe sure to include any references or sources used for the analysis here. These could be external links to pages or in-text citations. This will all help the reader find material needed for further evaluation.\n\nAlso, include this `Session Info` section. Manually add the packages used in your analysis in a vector, like shown below. This captures the environment used, which is important for reproducibility.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os macOS Tahoe 26.3\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-02-24\n pandoc 3.6.3 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown)\n quarto 1.8.27 @ /Applications/Positron.app/Contents/Resources/app/quarto/bin/quarto\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n janitor 2.2.1 2024-12-22 [1] RSPM\n readr 2.1.6 2025-11-14 [1] RSPM\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /Users/christinafillmore/Documents/GitHub/CAMIS/renv/library/macos/R-4.5/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/library\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", + "markdown": "---\ntitle: \"(Language): (Method Name)\"\nexecute: \n eval: false\n---\n\n\n\n# Introduction\n\nThis first section should provide a brief background on the methodology with links to associated journal articles, or relevant sources. This should give the reader a high level overview of the method and its implementation. This will be helpful in setting the stage for the examples and discussion that follow.\n\n# Libraries or Extensions Needed\n\nThis section should describe what libraries, packages, or additional materials needed for the analysis described.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(readr)\n```\n:::\n\n\n# Data Sources for the Analysis\n\nFor real data, please provide a means of accessing these data and describe the data source used in the example, especially if it is a new dataset not already in the `CAMIS/data/` folder.\n\nFor simulated/dummy datasets, please provide reproducible code to generate these data. This includes setting a random seed for reproducible random number generation.\n\n::: panel-tabset\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexData <- tibble::tribble( \n ~Var1, ~Var2, \n \"val1\", 1, \n \"val2\", 2)\n```\n:::\n\n\n## SAS\n\n```sas\ndata ExData; \n input Var1 $ Var2 ...; \n datalines; \n[Data Lines]; \nrun; \n```\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nimport pandas as pd\n\ndata = {\"score\": [40, 47, 52, 26, 19, 25], \"count\": [2, 2, 2, 1, 2, 2]}\ndf = pd.DataFrame(data)\n```\n:::\n\n:::\n\n*Again, it is important to give credit to the original source of the data used in your example.*\n\n# Statistical Method\n\n## Example Code using \\< package name \\>\n\nNow you can start adding specific examples of how to use these packages or methods to conduct the analysis. It is helpful to include some notes and comments throughout.\n\nFor R packages, it is helpful to prepend package names to functions so new readers can understand where specific functions originate, especially with new packages.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# For R packages\nlme4::lmer()\n\nsurvival::Surv()\n```\n:::\n\n\n## Comparison of Packages\n\nIf you are comparing more than one package of a single language, consider adding in a table to illustrate some of the differences/similarities between the packages/methods.\n\n## Conclusion\n\nFinally, add a conclusion section to the page. This may take on different forms but should broadly summarize the findings in the comparison of languages, packages, or approaches. In summarizing, be sure to include the advantages/limitations of the packages and approaches so the reader can understand the capabilities of the approaches to the statstical methodology.\n\nThere may be instances where you recommend specific languages, packages, or functions. Be sure to provide your rationale for these recommendations.\n\n## References\n\nBe sure to include any references or sources used for the analysis here. These could be external links to pages or in-text citations. This will all help the reader find material needed for further evaluation.\n\nAlso, include this `Session Info` section. Manually add the packages used in your analysis in a vector, like shown below. This captures the environment used, which is important for reproducibility.\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.5.2 (2025-10-31)\n os Ubuntu 24.04.3 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Europe/London\n date 2026-03-17\n pandoc 3.6.3 @ /home/michael/.positron-server/bin/f3aae65e0a1a11d39226cd884520f49301daef82/quarto/bin/tools/x86_64/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n package * version date (UTC) lib source\n janitor 2.2.1 2024-12-22 [1] RSPM (R 4.5.0)\n readr 2.1.6 2025-11-14 [1] RSPM (R 4.5.0)\n survival 3.8-3 2024-12-17 [2] CRAN (R 4.5.2)\n\n [1] /home/michael/source/personal/CAMIS/renv/library/linux-ubuntu-noble/R-4.5/x86_64-pc-linux-gnu\n [2] /opt/R/4.5.2/lib/R/library\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n:::", "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" diff --git a/_freeze/templates/template/execute-results/html.json b/_freeze/templates/template/execute-results/html.json deleted file mode 100644 index 594ec1157..000000000 --- a/_freeze/templates/template/execute-results/html.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "hash": "d73ee773aea23c7c23185d8ea11f268a", - "result": { - "engine": "knitr", - "markdown": "---\ntitle: \" \"\n---\n\n\n\n*Italic words are note for you.\\\nYour choice to use part or all to fit your topic.*\n\n## Data used\n\n\n\n::: {.cell}\n\n```{.eval .cell-code}\ndata ExData; \n input Var1 $ Var2 ...; \n datalines; \n[Data Lines]; \nrun; \n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nexData <- tibble::tribble( \n ~Var1, ~Var2, \n \"val1\", 1, \n \"val2\", 2) \n```\n:::\n\n\n\n*a sentence or two about the data. Consider using a package like {skimr} to help illustrate the data.*\n\n## Example Code using \n\n*Describe more details about the method.\\\nIf needed, add link to other web page/paper/SAS help methodology page, and list them in reference section.\\\nPrint key results as needed.\\\nAdd explanations about code and results as needed.*\n\n## Example Code using \n\n*Add if there is more than one package*\n\n## Comparison\n\n*If you are comparing more than one package or between different languages, consider adding in a table to illustrate some of the differences. When comparing between languages it can be nice to have a table with links to the pages with deeper dive of each language for a given method method like this table from the ANCOVA page:*\n\n| Analysis | Supported in R | Supported in SAS | Results Match | Notes |\n|---------------|---------------|---------------|---------------|---------------|\n| ANCOVA using general linear model and lsmeans | [Yes](../R/ancova.html) | [Yes](../SAS/ancova.html) | [Yes](#comp) | `GLM()` function from **sasLM** with `EMEANS=TRUE` is the easiest to use and matches SAS |\n\n*But, even if you are just comparing the difference in same language it can be helpful to have a table with values of interest across different packages. Using dynamic content you can add the values to a table without manually putting in each value.\\\nSAS result is very reliable in many cases.*\n\n## Conclusion\n\n*add a conclusion about which language(s) or package(s) are more recommended.\\\nThis section could have a feature like paper abstract, e.g. about the selection of package or SAS PROC.*\n\n## Reference\n\n*Cite all sources and references used in the analysis.*\n\n::: {.callout-note collapse=\"true\" title=\"Session Info\"}\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.4.3 (2025-02-28)\n os Ubuntu 24.04.2 LTS\n system x86_64, linux-gnu\n ui X11\n language (EN)\n collate C.UTF-8\n ctype C.UTF-8\n tz Europe/London\n date 2025-03-13\n pandoc NA (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P cli 3.6.3 2024-06-21 [?] RSPM (R 4.4.0)\n P digest 0.6.37 2024-08-19 [?] RSPM (R 4.4.0)\n P evaluate 1.0.0 2024-09-17 [?] RSPM (R 4.4.0)\n P fansi 1.0.6 2023-12-08 [?] RSPM (R 4.4.0)\n P fastmap 1.2.0 2024-05-15 [?] RSPM (R 4.4.0)\n P glue 1.8.0 2024-09-30 [?] RSPM (R 4.4.0)\n P htmltools 0.5.8.1 2024-04-04 [?] RSPM (R 4.4.0)\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM (R 4.4.0)\n P jsonlite 1.8.9 2024-09-20 [?] RSPM (R 4.4.0)\n P knitr 1.48 2024-07-07 [?] RSPM (R 4.4.0)\n P lifecycle 1.0.4 2023-11-07 [?] RSPM (R 4.4.0)\n P magrittr 2.0.3 2022-03-30 [?] RSPM (R 4.4.0)\n P pillar 1.9.0 2023-03-22 [?] RSPM (R 4.4.0)\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM (R 4.4.0)\n renv 1.0.10 2024-10-05 [1] RSPM (R 4.4.3)\n P rlang 1.1.4 2024-06-04 [?] RSPM (R 4.4.0)\n P rmarkdown 2.28 2024-08-17 [?] RSPM (R 4.4.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM (R 4.4.0)\n P tibble 3.2.1 2023-03-20 [?] RSPM (R 4.4.0)\n P utf8 1.2.4 2023-10-22 [?] RSPM (R 4.4.0)\n P vctrs 0.6.5 2023-12-01 [?] RSPM (R 4.4.0)\n P xfun 0.48 2024-10-03 [?] RSPM (R 4.4.0)\n P yaml 2.3.10 2024-07-26 [?] RSPM (R 4.4.0)\n\n [1] /home/michael/source/CAMIS/renv/library/linux-ubuntu-noble/R-4.4/x86_64-pc-linux-gnu\n [2] /opt/R/4.4.3/lib/R/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n\n\n:::\n", - "supporting": [], - "filters": [ - "rmarkdown/pagebreak.lua" - ], - "includes": {}, - "engineDependencies": {}, - "preserve": {}, - "postProcess": true - } -} \ No newline at end of file diff --git a/_quarto.yml b/_quarto.yml index b67c471e2..c862bfdee 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -45,6 +45,9 @@ website: text: "Source Code" href: "https://github.com/PSIAIMS/CAMIS/" +syntax-definitions: + - sas_syntax_highlighting.xml + format: html: toc: true diff --git a/data/quarto_pkg_dependencies.csv b/data/quarto_pkg_dependencies.csv index 60b18cb84..dea2705d2 100644 --- a/data/quarto_pkg_dependencies.csv +++ b/data/quarto_pkg_dependencies.csv @@ -111,7 +111,7 @@ Comp/r-sas-python_survey-stats-summary.qmd,rpds-py,0.18.0 Comp/r-sas-python_survey-stats-summary.qmd,samplics,0.4.22 Comp/r-sas-python_survey-stats-summary.qmd,scikit-learn,1.5.1 Comp/r-sas-python_survey-stats-summary.qmd,scipy,1.14.0 -Comp/r-sas-python_survey-stats-summary.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +Comp/r-sas-python_survey-stats-summary.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f Comp/r-sas-python_survey-stats-summary.qmd,six,1.16.0 Comp/r-sas-python_survey-stats-summary.qmd,sniffio,1.3.1 Comp/r-sas-python_survey-stats-summary.qmd,soupsieve,2.5 @@ -143,7 +143,7 @@ Comp/r-sas-wilcoxon-ranksum_hl.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 Comp/r-sas-wilcoxon-ranksum_hl.qmd,forcats,f884a14605c6a9eb01db676eee793ba7 Comp/r-sas-wilcoxon-ranksum_hl.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca Comp/r-sas-wilcoxon-ranksum_hl.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -Comp/r-sas-wilcoxon-ranksum_hl.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +Comp/r-sas-wilcoxon-ranksum_hl.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f Comp/r-sas-wilcoxonsr_HL.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca Comp/r-sas-wilcoxonsr_HL.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_ancova.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca @@ -151,15 +151,18 @@ Comp/r-sas_ancova.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_anova.qmd,emmeans,a080e7cd7ab0cede46c10b6fd5aaad7e Comp/r-sas_anova.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca Comp/r-sas_anova.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 +Comp/r-sas_binomial_test.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca +Comp/r-sas_binomial_test.qmd,procs,7b941743d6101b315e73c6652c3246db +Comp/r-sas_binomial_test.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_chi-sq.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_chi-sq.qmd,stats,4.5.2 -Comp/r-sas_ci_for_2indep_prop.qmd,ratesci,64079b87495e161be4ea77088fa36e16 +Comp/r-sas_ci_for_2indep_prop.qmd,ratesci,820fbf7297a0b05f6e941ea836e45b3a Comp/r-sas_ci_for_2indep_prop.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_ci_for_paired_prop.qmd,cardx,f5c603963977f502b06f4931d9a763ee -Comp/r-sas_ci_for_paired_prop.qmd,ratesci,64079b87495e161be4ea77088fa36e16 +Comp/r-sas_ci_for_paired_prop.qmd,ratesci,820fbf7297a0b05f6e941ea836e45b3a Comp/r-sas_ci_for_paired_prop.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_ci_for_prop.qmd,cardx,f5c603963977f502b06f4931d9a763ee -Comp/r-sas_ci_for_prop.qmd,ratesci,64079b87495e161be4ea77088fa36e16 +Comp/r-sas_ci_for_prop.qmd,ratesci,820fbf7297a0b05f6e941ea836e45b3a Comp/r-sas_ci_for_prop.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_cmh.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 Comp/r-sas_cmh.qmd,gt,7153ac3985ed54f1a318590b73950329 @@ -172,7 +175,7 @@ Comp/r-sas_cmh.qmd,vcdExtra,4ac959f853d8588bc2b841eeace108ae Comp/r-sas_friedman.qmd,ggpubr,3dc2857459c3c1205d25f476b849e66e Comp/r-sas_friedman.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_friedman.qmd,rstatix,e8b46cf278914385c7cb2bf4bce2f88a -Comp/r-sas_friedman.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +Comp/r-sas_friedman.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f Comp/r-sas_friedman.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e Comp/r-sas_gee.qmd,emmeans,a080e7cd7ab0cede46c10b6fd5aaad7e Comp/r-sas_gee.qmd,gee,6633962fa53cc9ff2c1aec45612ff9fc @@ -184,7 +187,6 @@ Comp/r-sas_gee.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_glmm.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca Comp/r-sas_glmm.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_kruskalwallis.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -Comp/r-sas_kruskalwallis.qmd,stats,4.5.2 Comp/r-sas_logistic-regr.qmd,beeca,8b758f5d16e84d7a09ba340fea01128e Comp/r-sas_logistic-regr.qmd,logistf,7486923705f975746154d53f1bb29210 Comp/r-sas_logistic-regr.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 @@ -194,7 +196,7 @@ Comp/r-sas_logistic-regr.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e Comp/r-sas_mcnemar.qmd,coin,4084b5070a40ad99dad581ed3b67bd55 Comp/r-sas_mcnemar.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca Comp/r-sas_mcnemar.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -Comp/r-sas_mcnemar.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +Comp/r-sas_mcnemar.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f Comp/r-sas_mcnemar.qmd,vcd,c371981692e15016fd45aa5cf8f9b42c Comp/r-sas_mmrm.qmd,details,c360a5a3f7628b073cae452f387d0dc9 Comp/r-sas_mmrm.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 @@ -227,9 +229,9 @@ Comp/r-sas_survival.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca Comp/r-sas_survival.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_survival.qmd,tibble,c55df870972551cac674b50cadb2d51f Comp/r-sas_survival_cif.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -Comp/r-sas_survival_cif.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +Comp/r-sas_survival_cif.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f Comp/r-sas_survival_csh.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -Comp/r-sas_survival_csh.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +Comp/r-sas_survival_csh.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f Comp/r-sas_tipping_point.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca Comp/r-sas_tipping_point.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 Comp/r-sas_tobit.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca @@ -264,7 +266,7 @@ R/Weighted-log-rank.qmd,haven,437bc7804f8ffdfcfed38d0aead6a9d7 R/Weighted-log-rank.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/Weighted-log-rank.qmd,nphRCT,0d5689fb965de0891df1852f5f89cd7f R/Weighted-log-rank.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/Weighted-log-rank.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/Weighted-log-rank.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/Weighted-log-rank.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/ancova.qmd,broom,6cf2f6757591ea712c57c5d7bedc3c27 R/ancova.qmd,car,d8a562f28580588f2e8cfd2bb7c7966f @@ -275,7 +277,7 @@ R/ancova.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/ancova.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/ancova.qmd,rstatix,e8b46cf278914385c7cb2bf4bce2f88a R/ancova.qmd,sasLM,2f8e8e88b3267f0d61319e77876eaf90 -R/ancova.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/ancova.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/ancova.qmd,stats,4.5.2 R/ancova.qmd,tibble,c55df870972551cac674b50cadb2d51f R/anova.qmd,broom,6cf2f6757591ea712c57c5d7bedc3c27 @@ -289,8 +291,9 @@ R/anova.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/association.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/association.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/association.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e +R/binomial_test.qmd,DescTools,22a99741fe397d2102777b56b92891f4 +R/binomial_test.qmd,exactci,66fbdf9b83514e606a1c422c8dbb3090 R/binomial_test.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/binomial_test.qmd,stats,4.5.2 R/binomial_test.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/causal_ps_matching.qmd,MatchIt,4605e6e151faa5275ee1eaae441c5f24 R/causal_ps_matching.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 @@ -300,7 +303,7 @@ R/ci_for_2indep_prop.qmd,DescTools,22a99741fe397d2102777b56b92891f4 R/ci_for_2indep_prop.qmd,cardx,f5c603963977f502b06f4931d9a763ee R/ci_for_2indep_prop.qmd,contingencytables,8428e33b6a3b50c03f6122cf18755735 R/ci_for_2indep_prop.qmd,gsDesign,1219b1ca9e68786ee96232da7f82e9c3 -R/ci_for_2indep_prop.qmd,ratesci,64079b87495e161be4ea77088fa36e16 +R/ci_for_2indep_prop.qmd,ratesci,820fbf7297a0b05f6e941ea836e45b3a R/ci_for_2indep_prop.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/ci_for_2indep_prop.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/ci_for_paired_prop.qmd,DescTools,22a99741fe397d2102777b56b92891f4 @@ -310,7 +313,7 @@ R/ci_for_paired_prop.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/ci_for_prop.qmd,DescTools,22a99741fe397d2102777b56b92891f4 R/ci_for_prop.qmd,PropCIs,83c746e8590a3e64d791daca69f1bf27 R/ci_for_prop.qmd,cardx,f5c603963977f502b06f4931d9a763ee -R/ci_for_prop.qmd,ratesci,64079b87495e161be4ea77088fa36e16 +R/ci_for_prop.qmd,ratesci,820fbf7297a0b05f6e941ea836e45b3a R/ci_for_prop.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/ci_for_prop.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/cmh.qmd,DescTools,22a99741fe397d2102777b56b92891f4 @@ -334,7 +337,7 @@ R/count_data_regression.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/friedman_test.qmd,broom,6cf2f6757591ea712c57c5d7bedc3c27 R/friedman_test.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/friedman_test.qmd,rstatix,e8b46cf278914385c7cb2bf4bce2f88a -R/friedman_test.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/friedman_test.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/friedman_test.qmd,stats,4.5.2 R/friedman_test.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/gee.qmd,contrast,2c9b9d83e7932e1b1a37cb849e98b3eb @@ -365,7 +368,7 @@ R/jonckheere.qmd,ggplot2,24744d322a00a520db329287bf02ce42 R/jonckheere.qmd,readr,141ebcec1bf55707751c7956a34d93db R/jonckheere.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/kolmogorov-smirnov_test.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/kolmogorov-smirnov_test.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/kolmogorov-smirnov_test.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/kruskal_wallis.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/kruskal_wallis.qmd,stats,4.5.2 R/linear-regression.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca @@ -374,7 +377,7 @@ R/logistic_regr.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 R/logistic_regr.qmd,emmeans,a080e7cd7ab0cede46c10b6fd5aaad7e R/logistic_regr.qmd,gmodels,44b137b88b606bc37f14f6570c9bb2b4 R/logistic_regr.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/logistic_regr.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/logistic_regr.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/logistic_regr.qmd,stats,4.5.2 R/manova.qmd,emmeans,a080e7cd7ab0cede46c10b6fd5aaad7e R/manova.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca @@ -383,7 +386,7 @@ R/manova.qmd,stats,4.5.2 R/manova.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/marginal_homogeneity_tests.qmd,coin,4084b5070a40ad99dad581ed3b67bd55 R/marginal_homogeneity_tests.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/marginal_homogeneity_tests.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/marginal_homogeneity_tests.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/mcnemar.qmd,coin,4084b5070a40ad99dad581ed3b67bd55 R/mcnemar.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/mcnemar.qmd,stats,4.5.2 @@ -404,6 +407,8 @@ R/nonpara_wilcoxon_ranksum.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/nparestimate.qmd,coin,4084b5070a40ad99dad581ed3b67bd55 R/nparestimate.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/nparestimate.qmd,stats,4.5.2 +R/random_effects_models.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca +R/random_effects_models.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/rbmi_continuous_joint.qmd,broom,6cf2f6757591ea712c57c5d7bedc3c27 R/rbmi_continuous_joint.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 R/rbmi_continuous_joint.qmd,emmeans,a080e7cd7ab0cede46c10b6fd5aaad7e @@ -413,18 +418,18 @@ R/rbmi_continuous_joint.qmd,mice,e3ecc7124c31517d341f78457d393db7 R/rbmi_continuous_joint.qmd,mmrm,f16234e9c97b54463bb4c62314182046 R/rbmi_continuous_joint.qmd,rbmi,5ed5f819dbf63b81fcaa0593f554ea3f R/rbmi_continuous_joint.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/rbmi_continuous_joint.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/rbmi_continuous_joint.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/rbmi_continuous_joint.qmd,tidyr,a4fa2f5876396f04814cb9d8d9ab89e9 R/recurrent_events.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 R/recurrent_events.qmd,gt,7153ac3985ed54f1a318590b73950329 R/recurrent_events.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/recurrent_events.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/recurrent_events.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/recurrent_events.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/recurrent_events.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/rounding.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 R/rounding.qmd,janitor,64f308bf1fbf5f856cdf4b4c7c0ce51b R/rounding.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/rounding.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/rounding.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/sample_s_equivalence.qmd,SampleSize4ClinicalTrials,19ef1548a5e491cb7b60d02764a92f92 R/sample_s_equivalence.qmd,TrialSize,702133aa787d503494668dd18deb18c9 R/sample_s_equivalence.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca @@ -447,26 +452,26 @@ R/sample_size_non-inferiority.qmd,epiR,8e7a32486485bcc4d6913936ad042421 R/sample_size_non-inferiority.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/sample_size_non-inferiority.qmd,rpact,3aea2099d950c2b75b8680273647bae0 R/summary-stats.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/summary-stats.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/summary-stats.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/summary_skew_kurt.qmd,e1071,9d516dde384526d4784166f888cd2c6c R/summary_skew_kurt.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/summary_skew_kurt.qmd,moments,bb94fd8ee5f7f127eae2bddbff26864d R/summary_skew_kurt.qmd,procs,7b941743d6101b315e73c6652c3246db R/summary_skew_kurt.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/summary_skew_kurt.qmd,sasLM,2f8e8e88b3267f0d61319e77876eaf90 -R/summary_skew_kurt.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/summary_skew_kurt.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/summary_skew_kurt.qmd,stringr,d47392652eedc68bf916657347ff2526 R/summary_skew_kurt.qmd,tibble,c55df870972551cac674b50cadb2d51f R/survey-stats-summary.qmd,gt,7153ac3985ed54f1a318590b73950329 R/survey-stats-summary.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/survey-stats-summary.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/survey-stats-summary.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/survey-stats-summary.qmd,survey,b30e7e82e4bebedc3e509976c7dd7403 R/survival.qmd,broom,6cf2f6757591ea712c57c5d7bedc3c27 R/survival.qmd,ggsurvfit,e53139457d2bb40b77edb67919f8bbce R/survival.qmd,haven,437bc7804f8ffdfcfed38d0aead6a9d7 R/survival.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/survival.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/survival.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/survival.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/survival.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/survival.qmd,survminer,d0e3855987f93df5f194b1240aaa30a5 R/survival.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e @@ -474,14 +479,14 @@ R/survival_cif.qmd,ggsurvfit,e53139457d2bb40b77edb67919f8bbce R/survival_cif.qmd,haven,437bc7804f8ffdfcfed38d0aead6a9d7 R/survival_cif.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/survival_cif.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/survival_cif.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/survival_cif.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/survival_cif.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/survival_cif.qmd,tidycmprsk,0003cdeb29c3ce3bb94fb05a13bc34b2 R/survival_cif.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/survival_csh.qmd,haven,437bc7804f8ffdfcfed38d0aead6a9d7 R/survival_csh.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/survival_csh.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/survival_csh.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/survival_csh.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/survival_csh.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/survival_csh.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/tipping_point.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 @@ -500,7 +505,7 @@ R/tipping_point.qmd,purrr,0154ac5b9ef4df1a7be3a54602e781cf R/tipping_point.qmd,rbmi,5ed5f819dbf63b81fcaa0593f554ea3f R/tipping_point.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/tipping_point.qmd,rstan,5f47b80f0db40503697eef138a31a6ef -R/tipping_point.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/tipping_point.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/tipping_point.qmd,tidyr,a4fa2f5876396f04814cb9d8d9ab89e9 R/tobit regression.qmd,VGAM,d0c561977ba6b718d3261aa60b417c2d R/tobit regression.qmd,broom,6cf2f6757591ea712c57c5d7bedc3c27 @@ -509,26 +514,26 @@ R/tobit regression.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 R/tobit regression.qmd,emmeans,a080e7cd7ab0cede46c10b6fd5aaad7e R/tobit regression.qmd,gt,7153ac3985ed54f1a318590b73950329 R/tobit regression.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/tobit regression.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/tobit regression.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/tobit regression.qmd,stats,4.5.2 R/tobit regression.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 R/ttest_1Sample.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/ttest_1Sample.qmd,procs,7b941743d6101b315e73c6652c3246db R/ttest_1Sample.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/ttest_1Sample.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/ttest_1Sample.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/ttest_1Sample.qmd,stats,4.5.2 R/ttest_1Sample.qmd,tibble,c55df870972551cac674b50cadb2d51f R/ttest_2Sample.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 R/ttest_2Sample.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/ttest_2Sample.qmd,procs,7b941743d6101b315e73c6652c3246db R/ttest_2Sample.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/ttest_2Sample.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/ttest_2Sample.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/ttest_2Sample.qmd,stats,4.5.2 R/ttest_2Sample.qmd,tibble,c55df870972551cac674b50cadb2d51f R/ttest_Paired.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca R/ttest_Paired.qmd,procs,7b941743d6101b315e73c6652c3246db R/ttest_Paired.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -R/ttest_Paired.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/ttest_Paired.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/ttest_Paired.qmd,stats,4.5.2 R/ttest_Paired.qmd,tibble,c55df870972551cac674b50cadb2d51f R/wilcoxonsr_hodges_lehman.qmd,DOS2,ef663074ef3fef2e68c4c77274372e51 @@ -543,7 +548,7 @@ R/xgboost.qmd,MASS,a41d0fc833ea756a1136b60a437efe26 R/xgboost.qmd,parsnip,01b65f5a5d1397b31f62defa71cce5b2 R/xgboost.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 R/xgboost.qmd,rsample,1b3ba002dc2521d38330ad72bf4c4431 -R/xgboost.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +R/xgboost.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f R/xgboost.qmd,tidyverse,c328568cd14ea89a83bd4ca7f54ae07e R/xgboost.qmd,xgboost,6b1cd6c890fb985627c9d7529318ca83 SAS/ancova.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca @@ -576,6 +581,8 @@ SAS/mcnemar.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/mcnemar.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 SAS/mi_mar_regression.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/mi_mar_regression.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 +SAS/random_effects_models.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca +SAS/random_effects_models.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 SAS/rbmi_continuous_joint_SAS.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/rbmi_continuous_joint_SAS.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 SAS/recurrent_events.qmd,dplyr,71a469a5d5f9fdbf7de6c0dbeece0e22 @@ -585,25 +592,24 @@ SAS/recurrent_events.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 SAS/recurrent_events.qmd,survival,fe42836742a4f065b3f3f5de81fccab9 SAS/rmst.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/rmst.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -SAS/rmst.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +SAS/rmst.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f SAS/rounding.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -SAS/rounding.qmd,sasquatch,aee151cb374f0ac71a0087f5e70da893 SAS/sample_s_StatXact_test_of_trends.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/sample_s_StatXact_test_of_trends.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 SAS/sample_s_equivalence.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/sample_s_equivalence.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -SAS/sample_s_equivalence.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +SAS/sample_s_equivalence.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f SAS/sample_s_noninferiority.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/sample_s_noninferiority.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -SAS/sample_s_noninferiority.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +SAS/sample_s_noninferiority.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f SAS/sample_s_superiority.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/sample_s_superiority.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -SAS/sample_s_superiority.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +SAS/sample_s_superiority.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f SAS/summary_skew_kurt.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/summary_skew_kurt.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 SAS/survey-stats-summary.qmd,gt,7153ac3985ed54f1a318590b73950329 SAS/survey-stats-summary.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -SAS/survey-stats-summary.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +SAS/survey-stats-summary.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f SAS/survey-stats-summary.qmd,survey,b30e7e82e4bebedc3e509976c7dd7403 SAS/survival.qmd,knitr,27682babb50f03b6eb7939ea69ec79ca SAS/survival.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 @@ -2576,7 +2582,7 @@ python/survey-stats-summary.qmd,rpds-py,0.18.0 python/survey-stats-summary.qmd,samplics,0.4.22 python/survey-stats-summary.qmd,scikit-learn,1.5.1 python/survey-stats-summary.qmd,scipy,1.14.0 -python/survey-stats-summary.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +python/survey-stats-summary.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f python/survey-stats-summary.qmd,six,1.16.0 python/survey-stats-summary.qmd,sniffio,1.3.1 python/survey-stats-summary.qmd,soupsieve,2.5 @@ -2727,7 +2733,7 @@ python/two_samples_t_test.qmd,widgetsnbextension,4.0.10 python/two_samples_t_test.qmd,zipp,3.18.1 templates/multi_language_template.qmd,lme4,0a0abf28da4a31382cf6d523f2e1e3d1 templates/multi_language_template.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -templates/multi_language_template.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +templates/multi_language_template.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f templates/single_language_template.qmd,rmarkdown,efe19db0fde0fff13cea7eec6f695021 -templates/single_language_template.qmd,sessioninfo,bf169c6e52cdbded916e448dc1254913 +templates/single_language_template.qmd,sessioninfo,3f9796a8d0a0e8c6eb49a4b029359d1f templates/single_language_template.qmd,tibble,c55df870972551cac674b50cadb2d51f diff --git a/renv.lock b/renv.lock index 3c4cfd32a..a3cc696e3 100644 --- a/renv.lock +++ b/renv.lock @@ -9,7 +9,7 @@ ] }, "Python": { - "Version": "3.11.2", + "Version": "3.12.7", "Type": "virtualenv", "Name": "./renv/python/virtualenvs/renv-python-3.12" }, @@ -4495,26 +4495,6 @@ ], "Hash": "2f8e8e88b3267f0d61319e77876eaf90" }, - "sasquatch": { - "Package": "sasquatch", - "Version": "0.1.0.9003", - "Source": "Repository", - "RemoteType": "repository", - "RemoteUrl": "https://github.com/ropensci/sasquatch", - "RemoteRef": "main", - "RemoteSha": "e13f0a8b3c7a0a4cd15c37d9d0442b2b1200425e", - "Requirements": [ - "R", - "cli", - "evaluate", - "htmlwidgets", - "knitr", - "reticulate", - "rlang", - "rstudioapi" - ], - "Hash": "aee151cb374f0ac71a0087f5e70da893" - }, "sass": { "Package": "sass", "Version": "0.4.10", @@ -4617,16 +4597,16 @@ }, "sessioninfo": { "Package": "sessioninfo", - "Version": "1.2.3", + "Version": "1.2.2", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "cli", "tools", "utils" ], - "Hash": "bf169c6e52cdbded916e448dc1254913" + "Hash": "3f9796a8d0a0e8c6eb49a4b029359d1f" }, "sf": { "Package": "sf", diff --git a/sas_syntax_highlighting.xml b/sas_syntax_highlighting.xml new file mode 100644 index 000000000..a9fc296a5 --- /dev/null +++ b/sas_syntax_highlighting.xml @@ -0,0 +1,1373 @@ + + + + + + + + + + +]> + + + + + + + + BY + DO + ELSE + END + GOTO + IF + LINK + OTHERWISE + SELECT + THEN + TO + UNTIL + WHEN + WHILE + + + + AND + BETWEEN + CONTAINS + EQ + EQT + GE + GET + GT + IN + IS + LE + LET + LIKE + LT + NE + NET + NOT + OR + SAME + ALSO + + + + _ALL_ + _BLANKPAGE_ + _CHARACTER_ + _ERROR_ + _FILE_ + _INFILE_ + _IORC_ + _MRG_ + _NULL_ + _NUMERIC_ + _N_ + _PAGE_ + _TEMPORARY_ + + + + AXIS + CAS + CASLIB + CATNAME + DECLARE + ENDRSUBMIT + ENDSAS + FILENAME + FOOTNOTE + GOPTIONS + KILLTASK + LEGEND + LIBNAME + LISTTASK + LOCK + MISSING + ODS + PAGE + PATTERN + PROCEDURE + RESETLINE + RGET + RSPT + RSUBMIT + SASFILE + SIGNOFF + SIGNON + SKIP + SYMBOL + SYSECHO + SYSTASK + TITLE + TRANTAB + WAITFOR + + + + ABORT + ARRAY + ATTRIB + BY + CALL + CONTINUE + DELETE + DESCRIBE + DROP + ERROR + EXECUTE + FILE + FORMAT + INFILE + INFORMAT + INPUT + KEEP + LABEL + LEAVE + LENGTH + LIST + LOSTCARD + MERGE + MODIFY + OUTPUT + PUT + PUTLOG + REDIRECT + REMOVE + RENAME + REPLACE + RETAIN + RETURN + SET + STOP + UPDATE + WHERE + + + + %ABORT + %COMPSTOR + %COPY + %DISPLAY + %DQLOAD + %DQPUTLOC + %DQUNLOAD + %ELSE + %GLOBAL + %GOTO + %IF + %INC + %INCLUDE + %INPUT + %LABEL + %LET + %LIST + %LOCAL + %PUT + %RETURN + %RUN + %SYMDEL + %SYSMACDELETE + %SYSMACEXEC + %SYSEXEC + %THEN + %TO + %TPLOT + %UNTIL + %WHILE + %WINDOW + + + + ALTER + APPEND + BUFNO + BUFSIZE + CASLIB + CNTLLEV + COMPRESS + COPIES + DATALIMIT + DLDMGACTION + DROP + DUPLICATE + ENCODING + ENCRYPT + ENCRYPTKEY + EOC + EXTENDOBSCOUNTER + FILECLOSE + FIRSTOBS + GENMAX + GENNUM + IDXNAME + IDXWHERE + IN + INDEX + KEEP + LABEL + MEMTYPE + OBS + OBSBUF + ONDEMAND + ORDERBY + OUTREP + PARTITION + POINTOBS + PROMOTE + PW + PWREQ + READ + READTRANSFERSIZE + RENAME + REPEMPTY + REPLACE + REUSE + ROLE + RTS + SCRIPT + SGIO + SORTEDBY + SPILL + TAG + TEMPEXPRESS + TEMPNAMES + TOBSNO + TRANSCODE_FAIL + TRANTAB + TYPE + WHERE + WHEREUP + WRITE + WRITETRANSFERSIZE + + + + ABS + ADDMATRIX + ADDR + ADDRLONG + ADDROW + ADD_TABLE_ATTR + AIRY + ALLCOMB + ALLCOMBI + ALLPERM + ANYALNUM + ANYALPHA + ANYCNTRL + ANYDIGIT + ANYFIRST + ANYGRAPH + ANYLOWER + ANYNAME + ANYPRINT + ANYPUNCT + ANYSPACE + ANYUPPER + ANYXDIGIT + ARCOS + ARCOSH + ARMCONV + ARMEND + ARMGTID + ARMINIT + ARMJOIN + ARMPROC + ARMSTOP + ARMSTRT + ARMUPDT + ARSIN + ARSINH + ARTANH + ASCEBC + ATAN + ATAN2 + ATTRC + ATTRN + BAND + BETA + BETAINV + BLACKCLPRC + BLACKPTPRC + BLKSHCLPRC + BLKSHPTPRC + BLSHIFT + BNOT + BOR + BQUOTE + BRSHIFT + BXOR + BYTE + CAT + CATQ + CATS + CATT + CATX + CDF + CEIL + CEILZ + CEXIST + CHAR + CHOL + CHOOSEC + CHOOSEN + CINV + CLIBEXIST + CLOSE + CMISS + CMPRES + CNONCT + COALESCE + COALESCEC + COLLATE + COMB + COMPANION_NEXT + COMPARE + COMPBL + COMPCOST + COMPFUZZ + COMPGED + COMPLEV + COMPOUND + COMPRESS + COMPSTOR + CONSTANT + CONVX + CONVXP + COS + COSH + COT + COUNT + COUNTC + COUNTW + CSC + CSS + CUMIPMT + CUMPRINC + CUROBS + CV + DACCDB + DACCDBSL + DACCSL + DACCSYD + DACCTAB + DAIRY + DATATYP + DATDIF + DATE + DATEJUL + DATEPART + DATETIME + DAY + DCLOSE + DCREATE + DEFINE + DEPDB + DEPDBSL + DEPSL + DEPSYD + DEPTAB + DEQUOTE + DET + DEVIANCE + DHMS + DICTIONARY + DIF + DIGAMMA + DIM + DINFO + DISCARD + DIVIDE + DNUM + DOPEN + DOPTNAME + DOPTNUM + DOSUBL + DQCASE + DQGENDER + DQGENDERINFOGET + DQGENDERPARSED + DQIDENTIFY + DQLOCALEGUESS + DQLOCALEINFOGET + DQLOCALEINFOLIST + DQMATCH + DQMATCHINFOGET + DQMATCHPARSED + DQPARSE + DQPARSEINFOGET + DQPARSETOKENGET + DQPARSETOKENPUT + DQPATTERN + DQSCHEMEAPPLY + DQSRVARCHJOB + DQSRVCOPYLOG + DQSRVDELETELOG + DQSRVJOBSTATUS + DQSRVKILLJOB + DQSRVPROFJOBFILE + DQSRVPROFJOBREP + DQSRVUSER + DQSTANDARDIZE + DQTOKEN + DREAD + DROPNOTE + DSNAME + DSNCATLGD + DUR + DURP + DYNAMIC_ARRAY + EBCASC + EFFRATE + ELEMMULT + ENTRY_FIRST + ENTRY_NEXT + ENVLEN + ERF + ERFC + EUCLID + EVAL + EXECUTE + EXIST + EXISTS + EXP + EXPMATRIX + FACT + FAPPEND + FCLOSE + FCOL + FCOPY + FDELETE + FETCH + FETCHOBS + FEXIST + FGET + FILEATTR + FILEEXIST + FILENAME + FILEREF + FILLMATRIX + FINANCE + FIND + FINDC + FINDFILE + FINDTABLE + FINDW + FINFO + FINV + FIPNAME + FIPNAMEL + FIPSTATE + FIRST + FLOOR + FLOORZ + FMTINFO + FNONCT + FNOTE + FOPEN + FOPTNAME + FOPTNUM + FPOINT + FPOS + FPUT + FREAD + FREWIND + FRLEN + FSEP + FUZZ + FWRITE + GAMINV + GAMMA + GARKHCLPRC + GARKHPTPRC + GCD + GEODIST + GEOMEAN + GEOMEANZ + GETCASURL + GETDVI + GETJPI + GETLCASLIB + GETLOG + GETLSESSREF + GETLTAG + GETMSG + GETOPTION + GETQUOTA + GETSESSOPT + GETSYM + GETTERM + GETVARC + GETVARN + GITFN_CLONE + GITFN_COMMIT + GITFN_COMMITFREE + GITFN_COMMIT_GET + GITFN_COMMIT_LOG + GITFN_CO_BRANCH + GITFN_DEL_REPO + GITFN_DIFF + GITFN_DIFF_FREE + GITFN_DIFF_GET + GITFN_DIFF_IDX_F + GITFN_IDX_ADD + GITFN_IDX_REMOVE + GITFN_MRG_BRANCH + GITFN_NEW_BRANCH + GITFN_PULL + GITFN_PUSH + GITFN_RESET + GITFN_RESET_FILE + GITFN_STATUS + GITFN_STATUSFREE + GITFN_STATUS_GET + GITFN_VERSION + GRAYCODE + GRDSVC_ENABLE + GRDSVC_GETADDR + GRDSVC_GETINFO + GRDSVC_GETNAME + GRDSVC_NNODES + HARMEAN + HARMEANZ + HASHING + HASHING_FILE + HASHING_HMAC + HASHING_HMAC_FILE + HASHING_HMAC_INIT + HASHING_INIT + HASHING_PART + HASHING_TERM + HBOUND + HMS + HOLIDAY + HOLIDAYCK + HOLIDAYCOUNT + HOLIDAYNAME + HOLIDAYNX + HOLIDAYNY + HOLIDAYTEST + HOUR + HTMLDECODE + HTMLENCODE + IBESSEL + IDENTITY + IFC + IFN + INDEX + INDEXC + INDEXW + INPUT + INPUTC + INPUTN + INSERT_CATALOG + INSERT_DATASET + INSERT_FDB + INSERT_FILE + INSERT_HTML + INSERT_MDDB + INSERT_PACKAGE + INSERT_REF + INSERT_SQLVIEW + INSERT_VIEWER + INT + INTCINDEX + INTCK + INTCYCLE + INTFIT + INTFMT + INTGET + INTINDEX + INTNEST + INTNX + INTRR + INTSEAS + INTSHIFT + INTTEST + INTZ + INV + INVCDF + INVERSE + IORCMSG + IPMT + IQR + IRR + IS8601_CONVERT + ISARRAY + ISBLOB + ISDICTIONARY + ISDOUBLE + ISINTEGER + ISLIST + ISNULL + ISSTRING + ISTABLE + ISTYPE + JBESSEL + JSONPP + JULDATE + JULDATE7 + KCOMPARE + KCOMPRESS + KCOUNT + KINDEX + KINDEXC + KLEFT + KLENGTH + KLOWCASE + KREVERSE + KRIGHT + KSCAN + KSUBSTR + KSUBSTRB + KTRANSLATE + KTRIM + KTRUNCATE + KUPCASE + KUPDATE + KUPDATEB + KURTOSIS + KVERIFY + LABEL + LAG + LARGEST + LBOUND + LCM + LCOMB + LEFT + LENGTH + LENGTHC + LENGTHM + LENGTHN + LEXCOMB + LEXCOMBI + LEXPERK + LEXPERM + LFACT + LGAMMA + LIBNAME + LIBREF + LIMMOMENT + LOG + LOG10 + LOG1PX + LOG2 + LOGBETA + LOGCDF + LOGISTIC + LOGPDF + LOGSDF + LOWCASE + LPERM + LPNORM + MAD + MARGRCLPRC + MARGRPTPRC + MAX + MD5 + MDY + MEAN + MEDIAN + MIN + MINUTE + MISSING + MOD + MODEXIST + MODULE + MODULEC + MODULEN + MODZ + MONTH + MOPEN + MORT + MSPLINT + MULT + MVALID + N + NETPV + NEWTABLE + NLITERAL + NMISS + NODENAME + NOMRATE + NORMAL + NOTALNUM + NOTALPHA + NOTCNTRL + NOTDIGIT + NOTE + NOTFIRST + NOTGRAPH + NOTLOWER + NOTNAME + NOTPRINT + NOTPUNCT + NOTSPACE + NOTUPPER + NOTXDIGIT + NPV + NRBQUOTE + NRQUOTE + NRSTR + NVALID + NWKDOM + OPEN + ORDINAL + PACKAGE_BEGIN + PACKAGE_DESTROY + PACKAGE_END + PACKAGE_FIRST + PACKAGE_NEXT + PACKAGE_PUBLISH + PACKAGE_TERM + PATHNAME + PCTL + PDF + PEEK + PEEKC + PEEKCLONG + PEEKLONG + PERM + PMF + PMT + POINT + POISSON + POKE + POKELONG + POWER + PPMT + PRINTTABLE + PROBBETA + PROBBNML + PROBBNRM + PROBCHI + PROBF + PROBGAM + PROBHYPR + PROBIT + PROBMC + PROBMED + PROBNEGB + PROBNORM + PROBT + PROPCASE + PRXCHANGE + PRXDEBUG + PRXFREE + PRXMATCH + PRXNEXT + PRXPAREN + PRXPARSE + PRXPOSN + PRXSUBSTR + PTRLONGADD + PUT + PUTC + PUTLOG + PUTN + PUTSYM + PVP + QCMPRES + QLEFT + QLOWCASE + QSCAN + QSUBSTR + QSYSFUNC + QTR + QTRIM + QUANTILE + QUOTE + QUPCASE + RANBIN + RANCAU + RANCOMB + RAND + RANEXP + RANGAM + RANGE + RANK + RANNOR + RANPERK + RANPERM + RANPOI + RANTBL + RANTRI + RANUNI + READPATH + READ_ARRAY + RENAME + REPEAT + RESOLVE + RETRIEVE_CATALOG + RETRIEVE_DATASET + RETRIEVE_FDB + RETRIEVE_FILE + RETRIEVE_HTML + RETRIEVE_MDDB + RETRIEVE_NESTED + RETRIEVE_PACKAGE + RETRIEVE_REF + RETRIEVE_SQLVIEW + RETRIEVE_VIEWER + REVERSE + REWIND + RIGHT + RMS + ROUND + ROUNDE + ROUNDZ + RUN_MACRO + RUN_SASFILE + RXCHANGE + RXFREE + RXSUBSTR + SAVING + SAVINGS + SCAN + SCANQ + SDF + SEC + SECOND + SESSFOUND + SESSIONS + SET + SETNULL + SETTERM + SHA256 + SHA256HEX + SHA256HMACHEX + SIGN + SIN + SINH + SKEWNESS + SLEEP + SMALLEST + SOAPWEB + SOAPWEBMETA + SOAPWIPSERVICE + SOAPWIPSRS + SOAPWS + SOAPWSMETA + SOFTMAX + SOLVE + SORT + SORTC + SORTN + SORT_REV + SOUNDEX + SPEDIS + SQRT + SQUANTILE + STD + STDERR + STDIZE + STFIPS + STNAME + STNAMEL + STR + STREAMINIT + STRIP + STRUCTINDEX + SUBPAD + SUBSTR + SUBSTRN + SUBTRACTMATRIX + SUM + SUMABS + SUPERQ + SYMEXIST + SYMGET + SYMGLOBL + SYMLOCAL + SYMPUT + SYMPUTX + SYSEVALF + SYSEXIST + SYSFUNC + SYSGET + SYSMSG + SYSPARM + SYSPROCESSID + SYSPROCESSNAME + SYSPROD + SYSRC + SYSTEM + TABCOLUMNS + TABTYPES + TAN + TANH + TERMIN + TERMOUT + TIME + TIMEPART + TIMEVALUE + TINV + TNONCT + TODAY + TRACEBACK + TRANSLATE + TRANSPOSE + TRANSTRN + TRANWRD + TRIGAMMA + TRIM + TRIMN + TRUNC + TSO + TTCLOSE + TTCONTRL + TTOPEN + TTREAD + TTWRITE + TYPEOF + TZID + TZONEID + TZONENAME + TZONEOFF + TZONES2U + TZONEU2S + UNIFORM + UNQUOTE + UPCASE + URLDECODE + URLENCODE + USS + UUIDGEN + VAR + VARFMT + VARINFMT + VARLABEL + VARLEN + VARNAME + VARNUM + VARRAY + VARRAYX + VARTYPE + VERIFY + VFORMAT + VFORMATD + VFORMATDX + VFORMATN + VFORMATNX + VFORMATW + VFORMATWX + VFORMATX + VINARRAY + VINARRAYX + VINFORMAT + VINFORMATD + VINFORMATDX + VINFORMATN + VINFORMATNX + VINFORMATW + VINFORMATWX + VINFORMATX + VLABEL + VLABELX + VLENGTH + VLENGTHX + VMS + VNAME + VNAMEX + VNEXT + VTYPE + VTYPEX + VVALUE + VVALUEX + WEEK + WEEKDAY + WHICHC + WHICHN + WRITE_ARRAY + WTO + YEAR + YIELDP + YRDIF + YYQ + ZEROMATRIX + ZIPCITY + ZIPCITYDISTANCE + ZIPFIPS + ZIPNAME + ZIPNAMEL + ZIPSTATE + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/templates/multi_language_template.qmd b/templates/multi_language_template.qmd index a72a9ccc7..67b0b1f3a 100644 --- a/templates/multi_language_template.qmd +++ b/templates/multi_language_template.qmd @@ -64,7 +64,7 @@ example_data <- data.frame( ## SAS -```{sas} +```sas /* Set random seed for reproducibility */ data example_data; call streaminit(123); @@ -110,7 +110,7 @@ summary(example_model) ### SAS -```{sas} +```sas /* SAS code for basic Poisson Regression */ proc genmod data=example_data; class predictor; @@ -144,7 +144,7 @@ summary(alternative_model) ### SAS -```{sas} +```sas /* SAS code for handling overdispersion */ proc genmod data=example_data; class predictor; diff --git a/templates/single_language_template.qmd b/templates/single_language_template.qmd index 282ed4d6c..a136831aa 100644 --- a/templates/single_language_template.qmd +++ b/templates/single_language_template.qmd @@ -39,7 +39,7 @@ exData <- tibble::tribble( ## SAS -```{sas} +```sas data ExData; input Var1 $ Var2 ...; datalines;