diff --git a/.Rbuildignore b/.Rbuildignore index 79d8a94..1d3c450 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -8,3 +8,5 @@ ^docs$ ^pkgdown$ ^Makefile +^doc$ +^Meta$ diff --git a/.gitignore b/.gitignore index 8c357c9..7cde852 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ .DS_Store inst/doc docs +/doc/ +/Meta/ diff --git a/DESCRIPTION b/DESCRIPTION index 127d5e5..f85ed81 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,44 +1,37 @@ Package: tractable Title: Analyzing diffusion MRI tract profiles -Version: 0.2.0 +Version: 0.2.1 Authors@R: c(person(given = "Adam", family = "Richie-Halford", email = "richiehalford@gmail.com", - role = c("aut", "cre"), + role = "aut", comment = c(ORCID = "0000-0001-9276-9084")), person(given = "Kelly", family = "Chang", email = "kchang4@uw.edu", - role = c("aut"), + role = c("aut", "cre"), comment = c(ORCID = "0000-0001-5385-2782")), person(given = "Teresa", family = "Gomez", email = "tmgomez@uw.edu", - role = c("aut"), - comment = c(ORCID = "XXX")), + role = "aut"), person(given = "McKenzie", family = "Hagen", email = "mphagen@uw.edu", - role = c("aut"), + role = "aut", comment = c(ORCID = "0000-0002-7454-8189")), person(given = "Ethan", family = "Roy", email = "ethanroy395@gmail.com", - role = c("aut"), + role = "aut", comment = c(ORCID = "0000-0002-4956-2237")), person(given = "Ariel", family = "Rokem", email = "arokem@gmail.com", - role = c("aut"), + role = "aut", comment = c(ORCID = "0000-0003-0679-1985"))) -Author: Adam Richie-Halford [aut, cre], - Kelly Chang (aut), - Teresa Gomez (aut), - McKenzie Hagen (aut), - Ethan Roy (aut), - Ariel Rokem (aut) -Maintainer: Adam Richie-Halford +Maintainer: Kelly Chang Description: A toolbox for analyzing diffusion MRI tract profiles. License: MIT + file LICENSE Encoding: UTF-8 diff --git a/man/tractable-package.Rd b/man/tractable-package.Rd index 8975c48..064ab06 100644 --- a/man/tractable-package.Rd +++ b/man/tractable-package.Rd @@ -17,12 +17,12 @@ Useful links: } \author{ -\strong{Maintainer}: Adam Richie-Halford \email{richiehalford@gmail.com} (\href{https://orcid.org/0000-0001-9276-9084}{ORCID}) +\strong{Maintainer}: Kelly Chang \email{kchang4@uw.edu} (\href{https://orcid.org/0000-0001-5385-2782}{ORCID}) Authors: \itemize{ - \item Kelly Chang \email{kchang4@uw.edu} (\href{https://orcid.org/0000-0001-5385-2782}{ORCID}) - \item Teresa Gomez \email{tmgomez@uw.edu} (\href{https://orcid.org/XXX}{ORCID}) + \item Adam Richie-Halford \email{richiehalford@gmail.com} (\href{https://orcid.org/0000-0001-9276-9084}{ORCID}) + \item Teresa Gomez \email{tmgomez@uw.edu} \item McKenzie Hagen \email{mphagen@uw.edu} (\href{https://orcid.org/0000-0002-7454-8189}{ORCID}) \item Ethan Roy \email{ethanroy395@gmail.com} (\href{https://orcid.org/0000-0002-4956-2237}{ORCID}) \item Ariel Rokem \email{arokem@gmail.com} (\href{https://orcid.org/0000-0003-0679-1985}{ORCID}) diff --git a/vignettes/changing-k.Rmd b/vignettes/changing-k.Rmd index 48d0cd6..3fe4630 100644 --- a/vignettes/changing-k.Rmd +++ b/vignettes/changing-k.Rmd @@ -13,6 +13,7 @@ knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) +options(scipen = 1, digits = 4) # set to four decimal ``` This vignette demonstrates how a GAM model changes with increased flexibility. @@ -33,14 +34,14 @@ Importantly, both the group ("ALS" or "CTRL") and the subject identifier ("subjectID") need to be factors for subsequent analysis to work properly. -```{r} +```{r load_data} df_sarica <- read_afq_sarica(na_omit = TRUE) df_sarica ``` We fit models with different levels of flexibility, encoded in different values of `k`: -```{r} +```{r k_models} k_values <- c(4, 8, 16, 32) models <- list() @@ -58,7 +59,7 @@ for (i in 1:length(k_values)){ And we plot the smooths, divided by group for each one of these levels: -```{r} +```{r plot_k, fig.align = "center", fig.width = 7.25, fig.height = 4} plots <- list() for (i in 1:length(k_values)){ plots[[i]] <- models[[i]] %>% @@ -71,9 +72,10 @@ for (i in 1:length(k_values)){ geom_ribbon(color = NA, alpha = 0.35) + geom_line(linewidth = 1) + scale_y_continuous(name = "FA") + - ggtitle(sprintf("k = %d", k_values[1])) + + ggtitle(sprintf("k = %d", k_values[i])) + theme_bw() } +names(plots) <- sprintf("k = %d", k_values) plots ``` diff --git a/vignettes/references.bib b/vignettes/references.bib index 3c1165e..9612761 100644 --- a/vignettes/references.bib +++ b/vignettes/references.bib @@ -121,37 +121,45 @@ @ARTICLE{Yeatman2018AFQB } @ARTICLE{VanRij2019, - title = {Analyzing the {Time} {Course} of {Pupillometric} {Data}}, + title = {Analyzing the Time Course of Pupillometric data}, author = {Van Rij, Jacolien and Hendriks, Petra and Van Rijn, Hedderik and Baayen, R. Harald and Wood, Simon N.}, - abstract = {This article provides a tutorial for analyzing pupillometric - data. Pupildilation has become increasingly popular in - psychological and psycholinguistic research as a measure to - trace language processing. However, there is no general consensus - about procedures to analyze the data, with most studies analyzing - extracted features from the pupil dilation data instead of analyzing - the pupil dilation trajectories directly. Recent studies have started - to apply nonlinear regression and other methods to analyze the pupil - dilation trajectories directly, utilizing all available information - in the continuously measured signal. This article applies a nonlinear - regression analysis, generalized additive mixed modeling, and - illustrates how to analyze the full-time course of the pupil dilation - signal. The regression analysis is particularly suited for analyzing - pupil dilation in the fields of psychological and psycholinguistic - research because generalized additive mixed models can include complex - nonlinear interactions for investigating the effects of properties of - stimuli (e.g., formant frequency) or participants (e.g., working memory - score) on the pupil dilation signal. To account for the variation due to - participants and items, nonlinear random effects can be included. However, - one of the challenges for analyzing time series data is dealing with the - autocorrelation in the residuals, which is rather extreme for the pupillary - signal. On the basis of simulations, we explain potential causes of this - extreme autocorrelation, and on the basis of the experimental data, we - show how to reduce their adverse effects, allowing a much more coherent - interpretation of pupillary data than possible with feature-based techniques.}, journal = "Trends in Hearing", volume = 23, pages = 1-22, month = jan, year = 2019, } + + +@ARTICLE{Olszowy2019-ge, + title = "Accurate autocorrelation modeling substantially improves {fMRI} + reliability", + author = "Olszowy, Wiktor and Aston, John and Rua, Catarina and Williams, + Guy B", + journal = "Nat. Commun.", + publisher = "Springer Science and Business Media LLC", + volume = 10, + number = 1, + pages = 1220, + abstract = "Given the recent controversies in some neuroimaging statistical + methods, we compare the most frequently used functional Magnetic + Resonance Imaging (fMRI) analysis packages: AFNI, FSL and SPM, + with regard to temporal autocorrelation modeling. This process, + sometimes known as pre-whitening, is conducted in virtually all + task fMRI studies. Here, we employ eleven datasets containing 980 + scans corresponding to different fMRI protocols and subject + populations. We found that autocorrelation modeling in AFNI, + although imperfect, performed much better than the + autocorrelation modeling of FSL and SPM. The presence of residual + autocorrelated noise in FSL and SPM leads to heavily confounded + first level results, particularly for low-frequency experimental + designs. SPM's alternative pre-whitening method, FAST, performed + better than SPM's default. The reliability of task fMRI studies + could be improved with more accurate autocorrelation modeling. We + recommend that fMRI analysis packages provide diagnostic plots to + make users aware of any pre-whitening problems.", + month = dec, + year = 2019, + language = "en" +} diff --git a/vignettes/tractable-autocorrelations.Rmd b/vignettes/tractable-autocorrelations.Rmd index c425a97..481d51c 100644 --- a/vignettes/tractable-autocorrelations.Rmd +++ b/vignettes/tractable-autocorrelations.Rmd @@ -13,17 +13,18 @@ knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) +options(scipen = 1, digits = 4) #set to two decimal ``` -This vignette demonstrates the use of an AR1 model within a GAM to account for -autocorrelation of the error terms of the model. We will use data from the -Sarica dataset [@Sarica2017] to demonstrate the how to include an autocorrelation term and its impact on the model statistics. +This vignette demonstrates the use of an AR1 model within the GAM to account for +the spatial autocorrelation of the errors of the model. The ideas that we use closely follow the work of Van Rij and colleagues [@VanRij2019]. While their work used data that is different in many respects from our data (time-series of eye pupil size in psycholingustic experiments), there are also some similarities to the tract profile data that we are analyzing in `tractable`. For example, the signals tend to change slowly over time in their analysis, and tend to change rather slowly in space in our analysis. In both cases, this means that the models fail to capture some characteristics of the data, and that some inferences from the GAM models tend to be anti-conservative, unless a mitigation strategy and careful model checking are implemented. In particular, in both cases, the residuals of the model may exhibit substantial auto-correlation. They may also end up being centered not on zero. It is always worth checking the underlying assumption of normally-distributed residuals by plotting a so-called QQ plot. Finally, we can use formal model comparison methods to adjudicate between alternative models. -We start by loading the `tractable` library: +As an example of this approach, we will use data from the Sarica dataset [@Sarica2017] to demonstrate how to include an autocorrelation term in the GAM, and its impact on model statistics. We start by loading the `tractable` library, as well as the [itsadug](https://www.rdocumentation.org/packages/itsadug/versions/2.4.1/topics/itsadug) and [gratia](https://gavinsimpson.github.io/gratia/) libraries, which both provide functionality to assess GAMs fit by `mgcv` (our workhorse for GAM fitting). -```{r setup} +```{r setup, echo = FALSE} library(tractable) library(itsadug) +library(gratia) ``` Next, we will use a function that is included in `tractable` to read this dataset directly into memory. @@ -35,11 +36,15 @@ df_sarica <- read_afq_sarica(na_omit = TRUE) df_sarica ``` -We will first fit a GAM model without any autocorrelation structure using the -`tractable_single_tract` function. This model will use "age" and "group" to -predict FA, while smoothing over the tract nodes. We will also use the automated -procedure implemented in `tractable_single_tract` to determine the ideal value -for `k`, a parameter used to determine the number of spline functions. The default behavior for `tractable_single_tract` is to include the AR1 model. To avoid this, we set the parameter `autocor` to `FALSE`. +We will first fit a GAM model that does not account for autocorrelation structure +in the residuals using the `tractable_single_tract` function. This model will use +"group" and "age" to account for the measured FA, while smoothing over the tract +nodes. We will also use the automated procedure implemented in +`tractable_single_tract` to determine the ideal value for `k`, a parameter used +to determine the number of spline functions. The default behavior for +`tractable_single_tract` is to include an AR1 model to account for +autocorrelations, as we will see below. But for now, to avoid this, we first set +the parameter `autocor` to `FALSE`. ```{r fit_no_autocor_model} @@ -49,7 +54,7 @@ cst_fit_no_autocor <- tractable_single_tract( target = "fa", regressors = c("age", "group"), node_group = "group", - k = "auto", + node_k = 16, autocor = FALSE ) @@ -57,49 +62,93 @@ cst_no_autocor_summary <- summary(cst_fit_no_autocor) cst_no_autocor_summary ``` -Examining the summary of the resulting GAM fit object shows us that the `k=16` +Examining the summary of the resulting GAM fit object shows us that the `k = 16` is sufficiently large to describe the spatial variation of tract profile data. In addition, we see that there is a statistically significant effect of group -(with a p-value of `r cst_no_autocor_summary$p.table["groupCTRL", "Pr(>|t|)"]`) and no statistically significant effect of age -(p = `r cst_no_autocor_summary$p.table["age", "Pr(>|t|)"]`). +(with a p-value of `r cst_no_autocor_summary$p.table["groupCTRL", "Pr(>|t|)"]`) and no statistically significant effect of age (p = `r cst_no_autocor_summary$p.table["age", "Pr(>|t|)"]`). + +In this model, no steps were taken to account for autocorrelated residuals. We will use a few model diagnostics to evaluate the model. First, we will use the `gratia::appraise` function, which presents a few different visuals about the model: + +```{r appraise_cst_no_autocor, fig.align = "center", fig.width = 7.25, fig.height = 7.25} +appraise(cst_fit_no_autocor) +``` + + +The top left plot is a QQ plot, it shows the residuals as a function of their quantile. In a perfect world (or at least a world in which model assumptions are valid), these points should fall along the equality line. This plot is not terrible, but there are some deviations. Another plot to look at is the plot of the residuals as a function of the prediction. Here, we look both at the overall location and shape of the point cloud, as well as for any signs of clear structure. In this case, we see some signs of trouble: the whole cloud is shifted away from zero, and there are what appear as trails of points, suggesting that some points form patterns. The first of these issues is also apparent in the bottom left, where residuals are not zero centerd in the histogram of model residuals. + +Another diagnostic plot that is going to be crucial as a diagnostic is the plot of the autocorrelation function of the residuals. A plot of this sort is provided by the `itsadug` library: + +```{r plot_acf_residuals_no_autocor, fig.align = "center", fig.width = 7.25, fig.height = 4} +acf_resid(cst_fit_no_autocor) +``` + +The dashed blue lines indicate the 95% confidence interval for the auto-correlation +function of white noise. Here, we see that the auto-correlation at many of the lags +(and particularly in the immediate neighbor, lag-1) is substantially larger than +would be expected for a function with no autocorrelations. These autocorrelations +pose a danger to inference not only because of mis-specification of the model, but +also because we are going to under-estimate the standard error of the model in +this setting and this will result in false positives (this is what Van Rij et al. +elegantly refer to as "anti-conservative" models) To account for potential spatial autocorrelation of FA values along the length -of the tract profile, we can incorporate a AR1 model into our GAM. Briefly, this -AR1 model is a linear model that estimates the amount of influence of the FA value of the preceding node on the FA value of the current node (see [@VanRij2019](https://journals.sagepub.com/doi/pdf/10.1177/2331216519832483) for an overview of accounting for autocorrelation using the `mgcv` package). +of the tract profile, we can incorporate an AR1 model into our GAM. Briefly, the +AR1 model is a linear model that estimates and accounts for the amount of influence +of the model residual FA value of each node on the residual FA value of +their neighbor node. This is somewhat akin to "pre-whitening" that fMRI researchers undertake, to account for temporal auto-correlations in the time-series measured with fMRI (see e.g. [@Olszowy2019-ge]). The AR1 model takes a parameter $\rho$ to estimate autocorrelation effects. We can pass our initial model into the function `itsadug::start_value_rho` to -automatically determine the value of $\rho$. We can also plot the autocorrelation by setting `plot = TRUE` within that function (pictured below). +automatically determine the value of $\rho$. -```{r calculate_rho} -rho <- start_value_rho(cst_fit_no_autocor) -start_value_rho(cst_fit_no_autocor, plot = TRUE) +```{r estimate_rho, fig.align = "center", fig.width = 7.25, fig.height = 4} +rho_1 <- start_value_rho(cst_fit_no_autocor) +rho_1 ``` -By default, the `tractable_single_tract` function will determine the value of -$\rho$ and incorporate the AR1 model into the GAM estimation. +By default, the `tractable_single_tract` function empirically determines the value of $\rho$ based on the data and uses it to incorporate the AR1 model of the residuals into the GAM estimation. -```{r fit_autocor_model} +```{r fit_model_with_autocor} cst_fit <- tractable_single_tract( df = df_sarica, tract = "Right Corticospinal", target = "fa", regressors = c("age", "group"), node_group = "group", - k = "auto" + node_k = 16 ) cst_summary <- summary(cst_fit) cst_summary ``` -Examining the summary of the resulting GAM fit object shows us that the inclusion of the AR1 model changes the resulting statistics of our model. Although there is still a statistically significant effect of group (p = `r cst_summary$p.table["groupCTRL", "Pr(>|t|)"]`), the value of the t-statistic on this term has changed from `r cst_no_autocor_summary$p.table["groupCTRL", "t value"]` to `r cst_summary$p.table["groupCTRL", "t value"]`. +Examining the summary of the resulting GAM fit object shows us that the inclusion +of the AR1 model changes the resulting statistics of our model. Although there +is still a statistically significant effect of group (p = `r cst_summary$p.table["groupCTRL", "Pr(>|t|)"]`), the value of the t-statistic on this term has changed from `r cst_no_autocor_summary$p.table["groupCTRL", "t value"]` to `r cst_summary$p.table["groupCTRL", "t value"]`, suggesting that the model has become substantially more conservative. + +Here as well, we can appraise the model with gratia: + +```{r appraise_fit_cst, fig.align = "center", fig.width = 7.25, fig.height = 7.25} +appraise(cst_fit) +``` -You can also examine the AR1 model residuals for autocorrelations with the `itsadug::acf_resid` function. +Notice some improvements to model characteristics: the residuals are more centered around zero and the QQ plot is somewhat improved. There is some residual structure in the scatter plot of residuals as a function of prediction. We can ask how bad this structure is in terms of the residual autocorrelation: -```{r plot_corrected_acf} -acf_resid(cst_fit) +```{r plot_acf_with_autocor, fig.align = "center", fig.width = 7.25, fig.height = 4} +rho_2 <- acf_resid(cst_fit)["2"] # at lag 2 +rho_2 ``` +This shows that the lag-1 autocorrelation has been reduced from approximately `r rho_1` to approximately `r rho_2`. + +Finally, formal model comparison can tell us which of these models better fit the +data. Using the `itsadug` library this can be done using the Akaike Information +Criterion as a comparator. In this case, this also indicates that the model that +accounts for autocorrelations also has smaller residuals considering the number of +parameters, suggesting that it is overall a better model of the data. + +```{r compare_models} +compareML(cst_fit_no_autocor, cst_fit) +``` ## References diff --git a/vignettes/tractable-single-tract.Rmd b/vignettes/tractable-single-tract.Rmd index 29c4b3c..7d46b44 100644 --- a/vignettes/tractable-single-tract.Rmd +++ b/vignettes/tractable-single-tract.Rmd @@ -13,6 +13,7 @@ knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) +options(scipen = 1, digits = 4) # set to four decimal ``` This vignette demonstrates the use of GAMs for statistical analysis of @@ -51,11 +52,11 @@ plot_handles <- plot_tract_profiles( ) ``` -```{r plot_fa, fig.width = 8, fig.height = 6} +```{r plot_fa, fig.align = "center", fig.width = 7.25, fig.height = 4} plot_handles$fa ``` -```{r plot_md, fig.width = 8, fig.height = 6} +```{r plot_md, fig.align = "center", fig.width = 7.25, fig.height = 4} plot_handles$md ``` @@ -85,7 +86,7 @@ cst_summary <- summary(cst_fit) cst_summary ``` -Examining the summary of the resulting GAM fit object shows us that the `k=16` +Examining the summary of the resulting GAM fit object shows us that the `k = 16` is sufficiently large to describe the spatial variation of tract profile data. In addition, we see that there is a statistically significant effect of group (with a p-value of `r cst_summary$p.table["groupCTRL", "Pr(>|t|)"]`) and no statistically significant effect of age