From 04c7f5db1bd375f4b2cdca65606ee4e4bcfe00f2 Mon Sep 17 00:00:00 2001 From: smartinez-yatiribio Date: Tue, 23 Sep 2025 15:00:58 -0700 Subject: [PATCH 1/5] chunked per-worker summarization; avoid exporting full input; --- R/dataProcess.R | 380 ++++++++++++++++++++++++++---------------------- 1 file changed, 205 insertions(+), 175 deletions(-) diff --git a/R/dataProcess.R b/R/dataProcess.R index 4bd00a4d..421198ad 100755 --- a/R/dataProcess.R +++ b/R/dataProcess.R @@ -66,6 +66,7 @@ #' \item{FeatureLevelData}{A data frame with feature-level information after processing. Columns include: #' \describe{ #' \item{PROTEIN}{Identifier for the protein associated with the feature.} +#' \item{PROTEIN}{Identifier for the protein associated with the feature.} #' \item{PEPTIDE}{Identifier for the peptide sequence.} #' \item{TRANSITION}{Identifier for the transition, typically representing a specific ion pair.} #' \item{FEATURE}{Unique identifier for the feature, which could be a combination of peptide and transition.} @@ -126,49 +127,49 @@ dataProcess = function( use_log_file = TRUE, append = FALSE, verbose = TRUE, log_file_path = NULL, numberOfCores = 1 ) { - MSstatsConvert::MSstatsLogsSettings(use_log_file, append, verbose, - log_file_path, - base = "MSstats_dataProcess_log_") - getOption("MSstatsLog")("INFO", "MSstats - dataProcess function") - .checkDataProcessParams( - logTrans, normalization, nameStandards, - list(method = featureSubset, n_top = n_top_feature, - remove_uninformative = remove_uninformative_feature_outlier), - list(method = summaryMethod, equal_var = equalFeatureVar), - list(symbol = censoredInt, MB = MBimpute)) - - peptides_dict = makePeptidesDictionary(as.data.table(unclass(raw)), normalization) - input = MSstatsPrepareForDataProcess(raw, logTrans, fix_missing) - input = MSstatsNormalize(input, normalization, peptides_dict, nameStandards) - input = MSstatsMergeFractions(input) - input = MSstatsHandleMissing(input, summaryMethod, MBimpute, - censoredInt, maxQuantileforCensored) - input = MSstatsSelectFeatures(input, featureSubset, n_top_feature, - min_feature_count) - .logDatasetInformation(input) - getOption("MSstatsLog")("INFO", - "== Start the summarization per subplot...") - getOption("MSstatsMsg")("INFO", - " == Start the summarization per subplot...") - - processed = getProcessed(input) - input = MSstatsPrepareForSummarization(input, summaryMethod, MBimpute, censoredInt, - remove_uninformative_feature_outlier) - summarized = tryCatch(MSstatsSummarizeWithMultipleCores(input, summaryMethod, - MBimpute, censoredInt, - remove50missing, equalFeatureVar, - numberOfCores), - error = function(e) { - print(e) - NULL - }) - getOption("MSstatsLog")("INFO", - "== Summarization is done.") - getOption("MSstatsMsg")("INFO", - " == Summarization is done.") - output = MSstatsSummarizationOutput(input, summarized, processed, - summaryMethod, MBimpute, censoredInt) - output + MSstatsConvert::MSstatsLogsSettings(use_log_file, append, verbose, + log_file_path, + base = "MSstats_dataProcess_log_") + getOption("MSstatsLog")("INFO", "MSstats - dataProcess function") + .checkDataProcessParams( + logTrans, normalization, nameStandards, + list(method = featureSubset, n_top = n_top_feature, + remove_uninformative = remove_uninformative_feature_outlier), + list(method = summaryMethod, equal_var = equalFeatureVar), + list(symbol = censoredInt, MB = MBimpute)) + + peptides_dict = makePeptidesDictionary(as.data.table(unclass(raw)), normalization) + input = MSstatsPrepareForDataProcess(raw, logTrans, fix_missing) + input = MSstatsNormalize(input, normalization, peptides_dict, nameStandards) + input = MSstatsMergeFractions(input) + input = MSstatsHandleMissing(input, summaryMethod, MBimpute, + censoredInt, maxQuantileforCensored) + input = MSstatsSelectFeatures(input, featureSubset, n_top_feature, + min_feature_count) + .logDatasetInformation(input) + getOption("MSstatsLog")("INFO", + "== Start the summarization per subplot...") + getOption("MSstatsMsg")("INFO", + " == Start the summarization per subplot...") + + processed = getProcessed(input) + input = MSstatsPrepareForSummarization(input, summaryMethod, MBimpute, censoredInt, + remove_uninformative_feature_outlier) + summarized = tryCatch(MSstatsSummarizeWithMultipleCores(input, summaryMethod, + MBimpute, censoredInt, + remove50missing, equalFeatureVar, + numberOfCores), + error = function(e) { + print(e) + NULL + }) + getOption("MSstatsLog")("INFO", + "== Summarization is done.") + getOption("MSstatsMsg")("INFO", + " == Summarization is done.") + output = MSstatsSummarizationOutput(input, summarized, processed, + summaryMethod, MBimpute, censoredInt) + output } #' Feature-level data summarization with multiple cores @@ -199,52 +200,81 @@ dataProcess = function( #' @importFrom parallel makeCluster parLapply stopCluster clusterExport #' #' @return list of length one with run-level data. -#' -MSstatsSummarizeWithMultipleCores = function(input, method, impute, censored_symbol, - remove50missing, equal_variance, numberOfCores = 1) { - if (numberOfCores > 1) { - protein_indices = split(seq_len(nrow(input)), list(input$PROTEIN)) - num_proteins = length(protein_indices) - function_environment = environment() - cl = parallel::makeCluster(numberOfCores) - getOption("MSstatsLog")("INFO", - "Starting the cluster setup for summarization") - parallel::clusterExport(cl, c("MSstatsSummarizeSingleTMP", - "MSstatsSummarizeSingleLinear", - "input", "impute", "censored_symbol", - "remove50missing", "protein_indices", - "equal_variance"), - envir = function_environment) - cat(paste0("Number of proteins to process: ", num_proteins), - sep = "\n", file = "MSstats_dataProcess_log_progress.log") - if (method == "TMP") { - summarized_results = parallel::parLapply(cl, seq_len(num_proteins), function(i) { - if (i %% 100 == 0) { - cat("Finished processing an additional 100 proteins", - sep = "\n", file = "MSstats_dataProcess_log_progress.log", append = TRUE) - } - single_protein = input[protein_indices[[i]],] - MSstatsSummarizeSingleTMP( - single_protein, impute, censored_symbol, remove50missing) - }) - } else { - summarized_results = parallel::parLapply(cl, seq_len(num_proteins), function(i) { - if (i %% 100 == 0) { - cat("Finished processing an additional 100 proteins", - sep = "\n", file = "MSstats_dataProcess_log_progress.log", append = TRUE) - } - single_protein = input[protein_indices[[i]],] - MSstatsSummarizeSingleLinear(single_protein, equal_variance) - }) - } - parallel::stopCluster(cl) - return(summarized_results) - } else { - return(MSstatsSummarizeWithSingleCore(input, method, impute, censored_symbol, - remove50missing, equal_variance)) +#' +MSstatsSummarizeWithMultipleCores <- function(input, method, impute, censored_symbol, remove50missing, equal_variance, numberOfCores = 1) { + if (numberOfCores <= 1) { + ret <- MSstatsSummarizeWithSingleCore( + input, method, impute, censored_symbol, remove50missing, equal_variance) + return(ret) + } + library(foreach) + library(doParallel) + # ---- group rows by PROTEIN (no list() wrapper needed) ---- + f <- as.factor(input$PROTEIN) + protein_indices <- split(seq_len(nrow(input)), f, drop = TRUE) + num_proteins <- length(protein_indices) + + # cap cores to number of groups + ncores <- max(1L, min(numberOfCores, num_proteins)) + + getOption("MSstatsLog")("INFO", "Starting the cluster setup for summarization") + cat(paste0("Number of proteins to process: ", num_proteins), + sep = "\n", file = "MSstats_dataProcess_log_progress.log") + + # ---- choose summarizer once, bind as a variable to export ---- + summarizer_fun <- switch( + method, + "TMP" = MSstatsSummarizeSingleTMP, + MSstatsSummarizeSingleLinear + ) + + # ---- CHUNK the work: split protein groups into ~ncores chunks ---- + # indices of groups: 1..num_proteins -> cut into ncores bins + group_ids <- seq_len(num_proteins) + bins <- cut(group_ids, breaks = ncores, labels = FALSE) + group_chunks <- split(group_ids, bins) + + # For each chunk, build the *actual data frames* to send to that worker. + # This avoids exporting the whole `input` to all workers; each worker + # receives only its own subset frames. + task_payloads <- lapply(group_chunks, function(gidx) { + lapply(gidx, function(k) input[protein_indices[[k]], , drop = FALSE]) + }) + + # ---- spin up the cluster ---- + cl <- makeCluster(ncores) + doParallel::registerDoParallel(cl) + on.exit(parallel::stopCluster(cl), add = TRUE) + + # ---- process: one task per worker, each looping its own chunk ---- + print(paste0("Starting parallel processing with ", length(task_payloads), "...")) + summarized_results <- foreach::foreach( + payload = task_payloads, + .combine = "c", + .packages = NULL , + .export = c("summarizer_fun", "impute", "censored_symbol", "remove50missing") + ) %dopar% { + out <- vector("list", length(payload)) + for (i in seq_along(payload)) { + if (i %% 100 == 0) { + cat("Finished processing an additional 100 proteins", + sep = "\n", file = "MSstats_dataProcess_log_progress.log", append = TRUE) + } + out[[i]] <- summarizer_fun( + payload[[i]], + impute, censored_symbol, remove50missing + ) } + return(out) + } + + # flatten back to a simple list of per-protein results (original order across chunks preserved) + # summarized_results <- do.call(c, summarized_chunks) + + return(summarized_results) } + #' Feature-level data summarization with 1 core #' #' @inheritParams MSstatsSummarizeWithMultipleCores @@ -274,33 +304,33 @@ MSstatsSummarizeWithMultipleCores = function(input, method, impute, censored_sym #' head(summarized[[1]][[1]]) # run-level summary #' MSstatsSummarizeWithSingleCore = function(input, method, impute, censored_symbol, - remove50missing, equal_variance) { - - - protein_indices = split(seq_len(nrow(input)), list(input$PROTEIN)) - num_proteins = length(protein_indices) - summarized_results = vector("list", num_proteins) - if (method == "TMP") { - pb = utils::txtProgressBar(min = 0, max = num_proteins, style = 3) - for (protein_id in seq_len(num_proteins)) { - single_protein = input[protein_indices[[protein_id]],] - summarized_results[[protein_id]] = MSstatsSummarizeSingleTMP( - single_protein, impute, censored_symbol, remove50missing) - setTxtProgressBar(pb, protein_id) - } - close(pb) - } else { - pb = utils::txtProgressBar(min = 0, max = num_proteins, style = 3) - for (protein_id in seq_len(num_proteins)) { - single_protein = input[protein_indices[[protein_id]],] - summarized_result = MSstatsSummarizeSingleLinear(single_protein, - equal_variance) - summarized_results[[protein_id]] = summarized_result - setTxtProgressBar(pb, protein_id) - } - close(pb) + remove50missing, equal_variance) { + + + protein_indices = split(seq_len(nrow(input)), list(input$PROTEIN)) + num_proteins = length(protein_indices) + summarized_results = vector("list", num_proteins) + if (method == "TMP") { + pb = utils::txtProgressBar(min = 0, max = num_proteins, style = 3) + for (protein_id in seq_len(num_proteins)) { + single_protein = input[protein_indices[[protein_id]],] + summarized_results[[protein_id]] = MSstatsSummarizeSingleTMP( + single_protein, impute, censored_symbol, remove50missing) + setTxtProgressBar(pb, protein_id) + } + close(pb) + } else { + pb = utils::txtProgressBar(min = 0, max = num_proteins, style = 3) + for (protein_id in seq_len(num_proteins)) { + single_protein = input[protein_indices[[protein_id]],] + summarized_result = MSstatsSummarizeSingleLinear(single_protein, + equal_variance) + summarized_results[[protein_id]] = summarized_result + setTxtProgressBar(pb, protein_id) } - summarized_results + close(pb) + } + summarized_results } #' Linear model-based summarization for a single protein @@ -332,34 +362,34 @@ MSstatsSummarizeWithSingleCore = function(input, method, impute, censored_symbol #' head(single_protein_summary[[1]]) #' MSstatsSummarizeSingleLinear = function(single_protein, equal_variances = TRUE) { - ABUNDANCE = RUN = FEATURE = PROTEIN = LogIntensities = NULL - - label = data.table::uniqueN(single_protein$LABEL) > 1 - single_protein = single_protein[!is.na(ABUNDANCE)] - single_protein[, RUN := factor(RUN)] - single_protein[, FEATURE := factor(FEATURE)] - - counts = xtabs(~ RUN + FEATURE, - data = unique(single_protein[, list(FEATURE, RUN)])) - counts = as.matrix(counts) - is_single_feature = .checkSingleFeature(single_protein) - - fit = try(.fitLinearModel(single_protein, is_single_feature, is_labeled = label, - equal_variances), silent = TRUE) - - if (inherits(fit, "try-error")) { - msg = paste("*** error : can't fit the model for ", unique(single_protein$PROTEIN)) - getOption("MSstatsLog")("WARN", msg) - getOption("MSstatsMsg")("WARN", msg) - result = NULL - } else { - cf = summary(fit)$coefficients[, 1] - result = unique(single_protein[, list(Protein = PROTEIN, RUN = RUN)]) - log_intensities = get_linear_summary(single_protein, cf, - counts, label) - result[, LogIntensities := log_intensities] - } - list(result) + ABUNDANCE = RUN = FEATURE = PROTEIN = LogIntensities = NULL + + label = data.table::uniqueN(single_protein$LABEL) > 1 + single_protein = single_protein[!is.na(ABUNDANCE)] + single_protein[, RUN := factor(RUN)] + single_protein[, FEATURE := factor(FEATURE)] + + counts = xtabs(~ RUN + FEATURE, + data = unique(single_protein[, list(FEATURE, RUN)])) + counts = as.matrix(counts) + is_single_feature = .checkSingleFeature(single_protein) + + fit = try(.fitLinearModel(single_protein, is_single_feature, is_labeled = label, + equal_variances), silent = TRUE) + + if (inherits(fit, "try-error")) { + msg = paste("*** error : can't fit the model for ", unique(single_protein$PROTEIN)) + getOption("MSstatsLog")("WARN", msg) + getOption("MSstatsMsg")("WARN", msg) + result = NULL + } else { + cf = summary(fit)$coefficients[, 1] + result = unique(single_protein[, list(Protein = PROTEIN, RUN = RUN)]) + log_intensities = get_linear_summary(single_protein, cf, + counts, label) + result[, LogIntensities := log_intensities] + } + list(result) } @@ -395,39 +425,39 @@ MSstatsSummarizeSingleLinear = function(single_protein, equal_variances = TRUE) #' MSstatsSummarizeSingleTMP = function(single_protein, impute, censored_symbol, remove50missing) { - newABUNDANCE = n_obs = n_obs_run = RUN = FEATURE = LABEL = NULL - predicted = censored = NULL - cols = intersect(colnames(single_protein), c("newABUNDANCE", "cen", "RUN", - "FEATURE", "ref")) - single_protein = single_protein[(n_obs > 1 & !is.na(n_obs)) & - (n_obs_run > 0 & !is.na(n_obs_run))] - if (nrow(single_protein) == 0) { - return(list(NULL, NULL)) - } - single_protein[, RUN := factor(RUN)] - single_protein[, FEATURE := factor(FEATURE)] - if (impute & any(single_protein[["censored"]])) { - survival_fit = .fitSurvival(single_protein[LABEL == "L", cols, - with = FALSE]) - single_protein[, predicted := predict(survival_fit, - newdata = .SD)] - single_protein[, predicted := ifelse(censored & (LABEL == "L"), predicted, NA)] - single_protein[, newABUNDANCE := ifelse(censored & LABEL == "L", - predicted, newABUNDANCE)] - survival = single_protein[, c(cols, "predicted"), with = FALSE] - } else { - survival = single_protein[, cols, with = FALSE] - survival[, predicted := NA] - } - - single_protein = .isSummarizable(single_protein, remove50missing) - if (is.null(single_protein)) { - return(list(NULL, NULL)) - } else { - single_protein = single_protein[!is.na(newABUNDANCE), ] - is_labeled = nlevels(single_protein$LABEL) > 1 - result = .runTukey(single_protein, is_labeled, censored_symbol, - remove50missing) - } - list(result, survival) + newABUNDANCE = n_obs = n_obs_run = RUN = FEATURE = LABEL = NULL + predicted = censored = NULL + cols = intersect(colnames(single_protein), c("newABUNDANCE", "cen", "RUN", + "FEATURE", "ref")) + single_protein = single_protein[(n_obs > 1 & !is.na(n_obs)) & + (n_obs_run > 0 & !is.na(n_obs_run))] + if (nrow(single_protein) == 0) { + return(list(NULL, NULL)) + } + single_protein[, RUN := factor(RUN)] + single_protein[, FEATURE := factor(FEATURE)] + if (impute & any(single_protein[["censored"]])) { + survival_fit = .fitSurvival(single_protein[LABEL == "L", cols, + with = FALSE]) + single_protein[, predicted := predict(survival_fit, + newdata = .SD)] + single_protein[, predicted := ifelse(censored & (LABEL == "L"), predicted, NA)] + single_protein[, newABUNDANCE := ifelse(censored & LABEL == "L", + predicted, newABUNDANCE)] + survival = single_protein[, c(cols, "predicted"), with = FALSE] + } else { + survival = single_protein[, cols, with = FALSE] + survival[, predicted := NA] + } + + single_protein = .isSummarizable(single_protein, remove50missing) + if (is.null(single_protein)) { + return(list(NULL, NULL)) + } else { + single_protein = single_protein[!is.na(newABUNDANCE), ] + is_labeled = nlevels(single_protein$LABEL) > 1 + result = .runTukey(single_protein, is_labeled, censored_symbol, + remove50missing) + } + list(result, survival) } From 9d93bb0240530c62926f4f4c2c3673b4db3a8231 Mon Sep 17 00:00:00 2001 From: smartinez-yatiribio Date: Tue, 23 Sep 2025 15:05:04 -0700 Subject: [PATCH 2/5] styling with styler --- R/dataProcess.R | 492 ++++++++++++++++++++++++++---------------------- 1 file changed, 262 insertions(+), 230 deletions(-) diff --git a/R/dataProcess.R b/R/dataProcess.R index 421198ad..6c4d1f62 100755 --- a/R/dataProcess.R +++ b/R/dataProcess.R @@ -1,66 +1,66 @@ #' Process MS data: clean, normalize and summarize before differential analysis -#' +#' #' @param raw name of the raw (input) data set. #' @param logTrans base of logarithm transformation: 2 (default) or 10. -#' @param normalization normalization to remove systematic bias between MS runs. +#' @param normalization normalization to remove systematic bias between MS runs. #' There are three different normalizations supported: -#' 'equalizeMedians' (default) represents constant normalization (equalizing the medians) -#' based on reference signals is performed. -#' 'quantile' represents quantile normalization based on reference signals -#' 'globalStandards' represents normalization with global standards proteins. -#' If FALSE, no normalization is performed. See MSstats vignettes for +#' 'equalizeMedians' (default) represents constant normalization (equalizing the medians) +#' based on reference signals is performed. +#' 'quantile' represents quantile normalization based on reference signals +#' 'globalStandards' represents normalization with global standards proteins. +#' If FALSE, no normalization is performed. See MSstats vignettes for #' recommendations on which normalization option to use. -#' @param nameStandards optional vector of global standard peptide names. +#' @param nameStandards optional vector of global standard peptide names. #' Required only for normalization with global standard peptides. -#' @param featureSubset "all" (default) uses all features that the data set has. -#' "top3" uses top 3 features which have highest average of log-intensity across runs. -#' "topN" uses top N features which has highest average of log-intensity across runs. -#' It needs the input for n_top_feature option. -#' "highQuality" flags uninformative feature and outliers. See MSstats vignettes for +#' @param featureSubset "all" (default) uses all features that the data set has. +#' "top3" uses top 3 features which have highest average of log-intensity across runs. +#' "topN" uses top N features which has highest average of log-intensity across runs. +#' It needs the input for n_top_feature option. +#' "highQuality" flags uninformative feature and outliers. See MSstats vignettes for #' recommendations on which feature selection option to use. -#' @param remove_uninformative_feature_outlier optional. Only required if -#' featureSubset = "highQuality". TRUE allows to remove +#' @param remove_uninformative_feature_outlier optional. Only required if +#' featureSubset = "highQuality". TRUE allows to remove #' 1) noisy features (flagged in the column feature_quality with "Uninformative"), -#' 2) outliers (flagged in the column, is_outlier with TRUE, -#' before run-level summarization. FALSE (default) uses all features and intensities +#' 2) outliers (flagged in the column, is_outlier with TRUE, +#' before run-level summarization. FALSE (default) uses all features and intensities #' for run-level summarization. #' @param min_feature_count optional. Only required if featureSubset = "highQuality". #' Defines a minimum number of informative features a protein needs to be considered #' in the feature selection algorithm. -#' @param n_top_feature optional. Only required if featureSubset = 'topN'. +#' @param n_top_feature optional. Only required if featureSubset = 'topN'. #' It that case, it specifies number of top features that will be used. #' Default is 3, which means to use top 3 features. -#' @param summaryMethod "TMP" (default) means Tukey's median polish, +#' @param summaryMethod "TMP" (default) means Tukey's median polish, #' which is robust estimation method. "linear" uses linear mixed model. -#' @param equalFeatureVar only for summaryMethod = "linear". default is TRUE. -#' Logical variable for whether the model should account for heterogeneous variation -#' among intensities from different features. Default is TRUE, which assume equal -#' variance among intensities from features. FALSE means that we cannot assume equal -#' variance among intensities from features, then we will account for heterogeneous +#' @param equalFeatureVar only for summaryMethod = "linear". default is TRUE. +#' Logical variable for whether the model should account for heterogeneous variation +#' among intensities from different features. Default is TRUE, which assume equal +#' variance among intensities from features. FALSE means that we cannot assume equal +#' variance among intensities from features, then we will account for heterogeneous #' variation from different features. -#' @param censoredInt Missing values are censored or at random. -#' 'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -#' '0' uses zero intensities as censored intensity. -#' In this case, NA intensities are missing at random. -#' The output from Skyline should use '0'. +#' @param censoredInt Missing values are censored or at random. +#' 'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +#' '0' uses zero intensities as censored intensity. +#' In this case, NA intensities are missing at random. +#' The output from Skyline should use '0'. #' Null assumes that all NA intensites are randomly missing. -#' @param MBimpute only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. -#' TRUE (default) imputes missing values with 'NA' or '0' (depending on censoredInt option) -#' by Accelerated failure model. If set to FALSE, no missing values are imputed. +#' @param MBimpute only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. +#' TRUE (default) imputes missing values with 'NA' or '0' (depending on censoredInt option) +#' by Accelerated failure model. If set to FALSE, no missing values are imputed. #' FALSE is appropriate only when missingness is assumed to be at random. #' See MSstats vignettes for recommendations on which imputation option to use. -#' @param remove50missing only for summaryMethod = "TMP". TRUE removes the proteins +#' @param remove50missing only for summaryMethod = "TMP". TRUE removes the proteins #' where every run has at least 50\% missing values for each peptide. FALSE is default. #' @param maxQuantileforCensored Maximum quantile for deciding censored missing values, default is 0.999 #' @param fix_missing Optional, same as the `fix_missing` parameter in MSstatsConvert::MSstatsBalancedDesign function -#' @param numberOfCores Number of cores for parallel processing. When > 1, -#' a logfile named `MSstats_dataProcess_log_progress.log` is created to +#' @param numberOfCores Number of cores for parallel processing. When > 1, +#' a logfile named `MSstats_dataProcess_log_progress.log` is created to #' track progress. Only works for Linux & Mac OS. Default is 1. #' @inheritParams .documentFunction -#' +#' #' @importFrom utils sessionInfo #' @importFrom data.table as.data.table -#' +#' #' @return A list containing: #' \describe{ #' \item{FeatureLevelData}{A data frame with feature-level information after processing. Columns include: @@ -99,112 +99,128 @@ #' } #' } #' } -#' +#' #' @export -#' -#' @examples +#' +#' @examples #' # Consider a raw data (i.e. SRMRawData) for a label-based SRM experiment from a yeast study #' # with ten time points (T1-T10) of interests and three biological replicates. #' # It is a time course experiment. The goal is to detect protein abundance changes #' # across time points. #' head(SRMRawData) #' # Log2 transformation and normalization are applied (default) -#' QuantData<-dataProcess(SRMRawData, use_log_file = FALSE) +#' QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) #' head(QuantData$FeatureLevelData) #' # Log10 transformation and normalization are applied -#' QuantData1<-dataProcess(SRMRawData, logTrans=10, use_log_file = FALSE) +#' QuantData1 <- dataProcess(SRMRawData, logTrans = 10, use_log_file = FALSE) #' head(QuantData1$FeatureLevelData) #' # Log2 transformation and no normalization are applied -#' QuantData2<-dataProcess(SRMRawData,normalization=FALSE, use_log_file = FALSE) +#' QuantData2 <- dataProcess(SRMRawData, normalization = FALSE, use_log_file = FALSE) #' head(QuantData2$FeatureLevelData) -#' -dataProcess = function( +#' +dataProcess <- function( raw, logTrans = 2, normalization = "equalizeMedians", nameStandards = NULL, - featureSubset = "all", remove_uninformative_feature_outlier = FALSE, - min_feature_count = 2, n_top_feature = 3, summaryMethod = "TMP", - equalFeatureVar = TRUE, censoredInt = "NA", MBimpute = TRUE, - remove50missing = FALSE, fix_missing = NULL, maxQuantileforCensored = 0.999, + featureSubset = "all", remove_uninformative_feature_outlier = FALSE, + min_feature_count = 2, n_top_feature = 3, summaryMethod = "TMP", + equalFeatureVar = TRUE, censoredInt = "NA", MBimpute = TRUE, + remove50missing = FALSE, fix_missing = NULL, maxQuantileforCensored = 0.999, use_log_file = TRUE, append = FALSE, verbose = TRUE, log_file_path = NULL, - numberOfCores = 1 -) { - MSstatsConvert::MSstatsLogsSettings(use_log_file, append, verbose, - log_file_path, - base = "MSstats_dataProcess_log_") + numberOfCores = 1) { + MSstatsConvert::MSstatsLogsSettings(use_log_file, append, verbose, + log_file_path, + base = "MSstats_dataProcess_log_" + ) getOption("MSstatsLog")("INFO", "MSstats - dataProcess function") .checkDataProcessParams( logTrans, normalization, nameStandards, - list(method = featureSubset, n_top = n_top_feature, - remove_uninformative = remove_uninformative_feature_outlier), + list( + method = featureSubset, n_top = n_top_feature, + remove_uninformative = remove_uninformative_feature_outlier + ), list(method = summaryMethod, equal_var = equalFeatureVar), - list(symbol = censoredInt, MB = MBimpute)) - - peptides_dict = makePeptidesDictionary(as.data.table(unclass(raw)), normalization) - input = MSstatsPrepareForDataProcess(raw, logTrans, fix_missing) - input = MSstatsNormalize(input, normalization, peptides_dict, nameStandards) - input = MSstatsMergeFractions(input) - input = MSstatsHandleMissing(input, summaryMethod, MBimpute, - censoredInt, maxQuantileforCensored) - input = MSstatsSelectFeatures(input, featureSubset, n_top_feature, - min_feature_count) + list(symbol = censoredInt, MB = MBimpute) + ) + + peptides_dict <- makePeptidesDictionary(as.data.table(unclass(raw)), normalization) + input <- MSstatsPrepareForDataProcess(raw, logTrans, fix_missing) + input <- MSstatsNormalize(input, normalization, peptides_dict, nameStandards) + input <- MSstatsMergeFractions(input) + input <- MSstatsHandleMissing( + input, summaryMethod, MBimpute, + censoredInt, maxQuantileforCensored + ) + input <- MSstatsSelectFeatures( + input, featureSubset, n_top_feature, + min_feature_count + ) .logDatasetInformation(input) getOption("MSstatsLog")("INFO", - "== Start the summarization per subplot...") + "== Start the summarization per subplot...") getOption("MSstatsMsg")("INFO", - " == Start the summarization per subplot...") - - processed = getProcessed(input) - input = MSstatsPrepareForSummarization(input, summaryMethod, MBimpute, censoredInt, - remove_uninformative_feature_outlier) - summarized = tryCatch(MSstatsSummarizeWithMultipleCores(input, summaryMethod, - MBimpute, censoredInt, - remove50missing, equalFeatureVar, - numberOfCores), - error = function(e) { - print(e) - NULL - }) + " == Start the summarization per subplot...") + + processed <- getProcessed(input) + input <- MSstatsPrepareForSummarization( + input, summaryMethod, MBimpute, censoredInt, + remove_uninformative_feature_outlier + ) + summarized <- tryCatch( + MSstatsSummarizeWithMultipleCores( + input, summaryMethod, + MBimpute, censoredInt, + remove50missing, equalFeatureVar, + numberOfCores + ), + error = function(e) { + print(e) + NULL + } + ) getOption("MSstatsLog")("INFO", - "== Summarization is done.") + "== Summarization is done.") getOption("MSstatsMsg")("INFO", - " == Summarization is done.") - output = MSstatsSummarizationOutput(input, summarized, processed, - summaryMethod, MBimpute, censoredInt) + " == Summarization is done.") + output <- MSstatsSummarizationOutput( + input, summarized, processed, + summaryMethod, MBimpute, censoredInt + ) output } #' Feature-level data summarization with multiple cores -#' +#' #' @param input feature-level data processed by dataProcess subfunctions -#' @param method summarization method: "linear" or "TMP" -#' @param equal_variance only for summaryMethod = "linear". Default is TRUE. -#' Logical variable for whether the model should account for heterogeneous variation +#' @param method summarization method: "linear" or "TMP" +#' @param equal_variance only for summaryMethod = "linear". Default is TRUE. +#' Logical variable for whether the model should account for heterogeneous variation #' among intensities from different features. Default is TRUE, which assume equal -#' variance among intensities from features. FALSE means that we cannot assume +#' variance among intensities from features. FALSE means that we cannot assume #' equal variance among intensities from features, then we will account for #' heterogeneous variation from different features. -#' @param censored_symbol Missing values are censored or at random. -#' 'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -#' '0' uses zero intensities as censored intensity. -#' In this case, NA intensities are missing at random. -#' The output from Skyline should use '0'. +#' @param censored_symbol Missing values are censored or at random. +#' 'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +#' '0' uses zero intensities as censored intensity. +#' In this case, NA intensities are missing at random. +#' The output from Skyline should use '0'. #' Null assumes that all NA intensites are randomly missing. -#' @param remove50missing only for summaryMethod = "TMP". TRUE removes the proteins +#' @param remove50missing only for summaryMethod = "TMP". TRUE removes the proteins #' where every run has at least 50\% missing values for each peptide. FALSE is default. -#' @param impute only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. -#' TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. +#' @param impute only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. +#' TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. #' FALSE uses the values assigned by cutoffCensored -#' @param numberOfCores Number of cores for parallel processing. When > 1, -#' a logfile named `MSstats_dataProcess_log_progress.log` is created to +#' @param numberOfCores Number of cores for parallel processing. When > 1, +#' a logfile named `MSstats_dataProcess_log_progress.log` is created to #' track progress. Only works for Linux & Mac OS. Default is 1. -#' -#' @importFrom parallel makeCluster parLapply stopCluster clusterExport -#' +#' +#' @importFrom parallel makeCluster parLapply stopCluster clusterExport +#' #' @return list of length one with run-level data. #' MSstatsSummarizeWithMultipleCores <- function(input, method, impute, censored_symbol, remove50missing, equal_variance, numberOfCores = 1) { if (numberOfCores <= 1) { ret <- MSstatsSummarizeWithSingleCore( - input, method, impute, censored_symbol, remove50missing, equal_variance) + input, method, impute, censored_symbol, remove50missing, equal_variance + ) return(ret) } library(foreach) @@ -213,52 +229,53 @@ MSstatsSummarizeWithMultipleCores <- function(input, method, impute, censored_sy f <- as.factor(input$PROTEIN) protein_indices <- split(seq_len(nrow(input)), f, drop = TRUE) num_proteins <- length(protein_indices) - + # cap cores to number of groups ncores <- max(1L, min(numberOfCores, num_proteins)) - + getOption("MSstatsLog")("INFO", "Starting the cluster setup for summarization") cat(paste0("Number of proteins to process: ", num_proteins), - sep = "\n", file = "MSstats_dataProcess_log_progress.log") - + sep = "\n", file = "MSstats_dataProcess_log_progress.log" + ) + # ---- choose summarizer once, bind as a variable to export ---- - summarizer_fun <- switch( - method, - "TMP" = MSstatsSummarizeSingleTMP, + summarizer_fun <- switch(method, + "TMP" = MSstatsSummarizeSingleTMP, MSstatsSummarizeSingleLinear ) - + # ---- CHUNK the work: split protein groups into ~ncores chunks ---- # indices of groups: 1..num_proteins -> cut into ncores bins group_ids <- seq_len(num_proteins) bins <- cut(group_ids, breaks = ncores, labels = FALSE) group_chunks <- split(group_ids, bins) - + # For each chunk, build the *actual data frames* to send to that worker. # This avoids exporting the whole `input` to all workers; each worker # receives only its own subset frames. task_payloads <- lapply(group_chunks, function(gidx) { lapply(gidx, function(k) input[protein_indices[[k]], , drop = FALSE]) }) - + # ---- spin up the cluster ---- cl <- makeCluster(ncores) doParallel::registerDoParallel(cl) on.exit(parallel::stopCluster(cl), add = TRUE) - + # ---- process: one task per worker, each looping its own chunk ---- - print(paste0("Starting parallel processing with ", length(task_payloads), "...")) + print(paste0("Starting parallel processing with ", length(task_payloads), "...")) summarized_results <- foreach::foreach( payload = task_payloads, .combine = "c", - .packages = NULL , + .packages = NULL, .export = c("summarizer_fun", "impute", "censored_symbol", "remove50missing") ) %dopar% { out <- vector("list", length(payload)) for (i in seq_along(payload)) { if (i %% 100 == 0) { cat("Finished processing an additional 100 proteins", - sep = "\n", file = "MSstats_dataProcess_log_progress.log", append = TRUE) + sep = "\n", file = "MSstats_dataProcess_log_progress.log", append = TRUE + ) } out[[i]] <- summarizer_fun( payload[[i]], @@ -267,65 +284,66 @@ MSstatsSummarizeWithMultipleCores <- function(input, method, impute, censored_sy } return(out) } - + # flatten back to a simple list of per-protein results (original order across chunks preserved) # summarized_results <- do.call(c, summarized_chunks) - + return(summarized_results) } #' Feature-level data summarization with 1 core -#' +#' #' @inheritParams MSstatsSummarizeWithMultipleCores -#' +#' #' @importFrom data.table uniqueN #' @importFrom utils setTxtProgressBar -#' +#' #' @return list of length one with run-level data. -#' +#' #' @export -#' +#' #' @examples -#' raw = DDARawData -#' method = "TMP" -#' cens = "NA" -#' impute = TRUE +#' raw <- DDARawData +#' method <- "TMP" +#' cens <- "NA" +#' impute <- TRUE #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) -#' input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -#' input = MSstatsMergeFractions(input) -#' input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -#' input = MSstatsSelectFeatures(input, "all") -#' processed = getProcessed(input) -#' input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -#' summarized = MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +#' input <- MSstatsMergeFractions(input) +#' input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +#' input <- MSstatsSelectFeatures(input, "all") +#' processed <- getProcessed(input) +#' input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +#' summarized <- MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) #' length(summarized) # list of summarization outputs for each protein #' head(summarized[[1]][[1]]) # run-level summary -#' -MSstatsSummarizeWithSingleCore = function(input, method, impute, censored_symbol, - remove50missing, equal_variance) { - - - protein_indices = split(seq_len(nrow(input)), list(input$PROTEIN)) - num_proteins = length(protein_indices) - summarized_results = vector("list", num_proteins) +#' +MSstatsSummarizeWithSingleCore <- function(input, method, impute, censored_symbol, + remove50missing, equal_variance) { + protein_indices <- split(seq_len(nrow(input)), list(input$PROTEIN)) + num_proteins <- length(protein_indices) + summarized_results <- vector("list", num_proteins) if (method == "TMP") { - pb = utils::txtProgressBar(min = 0, max = num_proteins, style = 3) + pb <- utils::txtProgressBar(min = 0, max = num_proteins, style = 3) for (protein_id in seq_len(num_proteins)) { - single_protein = input[protein_indices[[protein_id]],] - summarized_results[[protein_id]] = MSstatsSummarizeSingleTMP( - single_protein, impute, censored_symbol, remove50missing) + single_protein <- input[protein_indices[[protein_id]], ] + summarized_results[[protein_id]] <- MSstatsSummarizeSingleTMP( + single_protein, impute, censored_symbol, remove50missing + ) setTxtProgressBar(pb, protein_id) } close(pb) } else { - pb = utils::txtProgressBar(min = 0, max = num_proteins, style = 3) + pb <- utils::txtProgressBar(min = 0, max = num_proteins, style = 3) for (protein_id in seq_len(num_proteins)) { - single_protein = input[protein_indices[[protein_id]],] - summarized_result = MSstatsSummarizeSingleLinear(single_protein, - equal_variance) - summarized_results[[protein_id]] = summarized_result + single_protein <- input[protein_indices[[protein_id]], ] + summarized_result <- MSstatsSummarizeSingleLinear( + single_protein, + equal_variance + ) + summarized_results[[protein_id]] <- summarized_result setTxtProgressBar(pb, protein_id) } close(pb) @@ -334,59 +352,64 @@ MSstatsSummarizeWithSingleCore = function(input, method, impute, censored_symbol } #' Linear model-based summarization for a single protein -#' +#' #' @param single_protein feature-level data for a single protein #' @param equal_variances if TRUE, observation are assumed to be homoskedastic -#' +#' #' @return list with protein-level data -#' +#' #' @importFrom stats xtabs -#' +#' #' @export -#' +#' #' @examples -#' raw = DDARawData -#' method = "linear" -#' cens = NULL -#' impute = FALSE +#' raw <- DDARawData +#' method <- "linear" +#' cens <- NULL +#' impute <- FALSE #' # currently, MSstats only supports MBimpute = FALSE for linear summarization #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) -#' input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -#' input = MSstatsMergeFractions(input) -#' input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -#' input = MSstatsSelectFeatures(input, "all") -#' input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -#' input_split = split(input, input$PROTEIN) -#' single_protein_summary = MSstatsSummarizeSingleLinear(input_split[[1]]) +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +#' input <- MSstatsMergeFractions(input) +#' input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +#' input <- MSstatsSelectFeatures(input, "all") +#' input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +#' input_split <- split(input, input$PROTEIN) +#' single_protein_summary <- MSstatsSummarizeSingleLinear(input_split[[1]]) #' head(single_protein_summary[[1]]) -#' -MSstatsSummarizeSingleLinear = function(single_protein, equal_variances = TRUE) { - ABUNDANCE = RUN = FEATURE = PROTEIN = LogIntensities = NULL - - label = data.table::uniqueN(single_protein$LABEL) > 1 - single_protein = single_protein[!is.na(ABUNDANCE)] +#' +MSstatsSummarizeSingleLinear <- function(single_protein, equal_variances = TRUE) { + ABUNDANCE <- RUN <- FEATURE <- PROTEIN <- LogIntensities <- NULL + + label <- data.table::uniqueN(single_protein$LABEL) > 1 + single_protein <- single_protein[!is.na(ABUNDANCE)] single_protein[, RUN := factor(RUN)] single_protein[, FEATURE := factor(FEATURE)] - - counts = xtabs(~ RUN + FEATURE, - data = unique(single_protein[, list(FEATURE, RUN)])) - counts = as.matrix(counts) - is_single_feature = .checkSingleFeature(single_protein) - - fit = try(.fitLinearModel(single_protein, is_single_feature, is_labeled = label, - equal_variances), silent = TRUE) - + + counts <- xtabs(~ RUN + FEATURE, + data = unique(single_protein[, list(FEATURE, RUN)]) + ) + counts <- as.matrix(counts) + is_single_feature <- .checkSingleFeature(single_protein) + + fit <- try(.fitLinearModel(single_protein, is_single_feature, + is_labeled = label, + equal_variances + ), silent = TRUE) + if (inherits(fit, "try-error")) { - msg = paste("*** error : can't fit the model for ", unique(single_protein$PROTEIN)) + msg <- paste("*** error : can't fit the model for ", unique(single_protein$PROTEIN)) getOption("MSstatsLog")("WARN", msg) getOption("MSstatsMsg")("WARN", msg) - result = NULL + result <- NULL } else { - cf = summary(fit)$coefficients[, 1] - result = unique(single_protein[, list(Protein = PROTEIN, RUN = RUN)]) - log_intensities = get_linear_summary(single_protein, cf, - counts, label) + cf <- summary(fit)$coefficients[, 1] + result <- unique(single_protein[, list(Protein = PROTEIN, RUN = RUN)]) + log_intensities <- get_linear_summary( + single_protein, cf, + counts, label + ) result[, LogIntensities := log_intensities] } list(result) @@ -394,70 +417,79 @@ MSstatsSummarizeSingleLinear = function(single_protein, equal_variances = TRUE) #' Tukey Median Polish summarization for a single protein -#' +#' #' @param single_protein feature-level data for a single protein #' @inheritParams MSstatsSummarizeWithSingleCore -#' +#' #' @return list of two data.tables: one with fitted survival model, #' the other with protein-level data -#' +#' #' @importFrom stats predict -#' +#' #' @export -#' +#' #' @examples -#' raw = DDARawData -#' method = "TMP" -#' cens = "NA" -#' impute = TRUE +#' raw <- DDARawData +#' method <- "TMP" +#' cens <- "NA" +#' impute <- TRUE #' # currently, MSstats only supports MBimpute = FALSE for linear summarization #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) -#' input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -#' input = MSstatsMergeFractions(input) -#' input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -#' input = MSstatsSelectFeatures(input, "all") -#' input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -#' input_split = split(input, input$PROTEIN) -#' single_protein_summary = MSstatsSummarizeSingleTMP(input_split[[1]], -#' impute, cens, FALSE) +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +#' input <- MSstatsMergeFractions(input) +#' input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +#' input <- MSstatsSelectFeatures(input, "all") +#' input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +#' input_split <- split(input, input$PROTEIN) +#' single_protein_summary <- MSstatsSummarizeSingleTMP( +#' input_split[[1]], +#' impute, cens, FALSE +#' ) #' head(single_protein_summary[[1]]) -#' -MSstatsSummarizeSingleTMP = function(single_protein, impute, censored_symbol, - remove50missing) { - newABUNDANCE = n_obs = n_obs_run = RUN = FEATURE = LABEL = NULL - predicted = censored = NULL - cols = intersect(colnames(single_protein), c("newABUNDANCE", "cen", "RUN", - "FEATURE", "ref")) - single_protein = single_protein[(n_obs > 1 & !is.na(n_obs)) & - (n_obs_run > 0 & !is.na(n_obs_run))] +#' +MSstatsSummarizeSingleTMP <- function(single_protein, impute, censored_symbol, + remove50missing) { + newABUNDANCE <- n_obs <- n_obs_run <- RUN <- FEATURE <- LABEL <- NULL + predicted <- censored <- NULL + cols <- intersect(colnames(single_protein), c( + "newABUNDANCE", "cen", "RUN", + "FEATURE", "ref" + )) + single_protein <- single_protein[(n_obs > 1 & !is.na(n_obs)) & + (n_obs_run > 0 & !is.na(n_obs_run))] if (nrow(single_protein) == 0) { return(list(NULL, NULL)) } single_protein[, RUN := factor(RUN)] single_protein[, FEATURE := factor(FEATURE)] if (impute & any(single_protein[["censored"]])) { - survival_fit = .fitSurvival(single_protein[LABEL == "L", cols, - with = FALSE]) + survival_fit <- .fitSurvival(single_protein[LABEL == "L", cols, + with = FALSE + ]) single_protein[, predicted := predict(survival_fit, - newdata = .SD)] + newdata = .SD + )] single_protein[, predicted := ifelse(censored & (LABEL == "L"), predicted, NA)] single_protein[, newABUNDANCE := ifelse(censored & LABEL == "L", - predicted, newABUNDANCE)] - survival = single_protein[, c(cols, "predicted"), with = FALSE] + predicted, newABUNDANCE + )] + survival <- single_protein[, c(cols, "predicted"), with = FALSE] } else { - survival = single_protein[, cols, with = FALSE] + survival <- single_protein[, cols, with = FALSE] survival[, predicted := NA] } - - single_protein = .isSummarizable(single_protein, remove50missing) + + single_protein <- .isSummarizable(single_protein, remove50missing) if (is.null(single_protein)) { return(list(NULL, NULL)) } else { - single_protein = single_protein[!is.na(newABUNDANCE), ] - is_labeled = nlevels(single_protein$LABEL) > 1 - result = .runTukey(single_protein, is_labeled, censored_symbol, - remove50missing) + single_protein <- single_protein[!is.na(newABUNDANCE), ] + is_labeled <- nlevels(single_protein$LABEL) > 1 + result <- .runTukey( + single_protein, is_labeled, censored_symbol, + remove50missing + ) } list(result, survival) } From 8a08ce05a6b4811b6feb923193b741670a422378 Mon Sep 17 00:00:00 2001 From: smartinez-yatiribio Date: Tue, 23 Sep 2025 15:10:51 -0700 Subject: [PATCH 3/5] devtools::document() --- DESCRIPTION | 2 +- R/utils_groupcomparison.R | 786 +++++++++++--------- R/utils_groupcomparison_checks.R | 98 +-- R/utils_groupcomparison_contrasts.R | 151 ++-- R/utils_groupcomparison_plots.R | 567 +++++++------- R/utils_imputation.R | 106 +-- R/utils_normalize.R | 587 ++++++++------- R/utils_output.R | 313 ++++---- R/utils_plots_common.R | 207 +++--- R/utils_statistics.R | 331 +++++---- R/utils_summarization.R | 313 ++++---- R/utils_summarization_prepare.R | 258 +++---- man/MSstatsContrastMatrix.Rd | 2 +- man/MSstatsGroupComparison.Rd | 20 +- man/MSstatsGroupComparisonOutput.Rd | 26 +- man/MSstatsGroupComparisonSingleProtein.Rd | 7 +- man/MSstatsHandleMissing.Rd | 20 +- man/MSstatsMergeFractions.Rd | 14 +- man/MSstatsNormalize.Rd | 16 +- man/MSstatsPrepareForDataProcess.Rd | 10 +- man/MSstatsPrepareForGroupComparison.Rd | 4 +- man/MSstatsPrepareForSummarization.Rd | 16 +- man/MSstatsSelectFeatures.Rd | 22 +- man/MSstatsSummarizationOutput.Rd | 34 +- man/MSstatsSummarizeSingleLinear.Rd | 24 +- man/MSstatsSummarizeSingleTMP.Rd | 44 +- man/MSstatsSummarizeWithMultipleCores.Rd | 26 +- man/MSstatsSummarizeWithSingleCore.Rd | 46 +- man/SDRFtoAnnotation.Rd | 26 +- man/dataProcess.Rd | 77 +- man/dataProcessPlots.Rd | 94 +-- man/designSampleSize.Rd | 62 +- man/designSampleSizePlots.Rd | 44 +- man/dot-calculatePower.Rd | 8 +- man/dot-documentFunction.Rd | 6 +- man/dot-getNonMissingFilterStats.Rd | 10 +- man/dot-getNumSample.Rd | 6 +- man/dot-groupComparisonWithMultipleCores.Rd | 4 +- man/dot-makeConditionPlot.Rd | 10 +- man/dot-makeProfilePlot.Rd | 10 +- man/dot-makeQCPlot.Rd | 44 +- man/dot-makeSummaryProfilePlot.Rd | 6 +- man/dot-normalizeGlobalStandards.Rd | 4 +- man/dot-plotComparison.Rd | 4 +- man/dot-plotHeatmap.Rd | 4 +- man/dot-plotVolcano.Rd | 4 +- man/dot-runTukey.Rd | 12 +- man/dot-setCensoredByThreshold.Rd | 4 +- man/extractSDRF.Rd | 27 +- man/getProcessed.Rd | 28 +- man/groupComparison.Rd | 28 +- man/groupComparisonPlots.Rd | 74 +- man/groupComparisonQCPlots.Rd | 34 +- man/makePeptidesDictionary.Rd | 4 +- man/modelBasedQCPlots.Rd | 34 +- man/quantification.Rd | 10 +- tests/tinytest.R | 2 +- vignettes/MSstats.Rmd | 74 +- vignettes/MSstatsWorkflow.Rmd | 205 ++--- 59 files changed, 2680 insertions(+), 2329 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 1edd5f57..0c1d8d60 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -53,7 +53,7 @@ biocViews: ImmunoOncology, MassSpectrometry, Proteomics, Software, Normalization LazyData: true URL: http://msstats.org BugReports: https://groups.google.com/forum/#!forum/msstats -RoxygenNote: 7.3.2 +RoxygenNote: 7.3.3 Encoding: UTF-8 NeedsCompilation: no Packaged: 2017-10-20 02:13:12 UTC; meenachoi diff --git a/R/utils_groupcomparison.R b/R/utils_groupcomparison.R index cf008fcd..9182abb6 100644 --- a/R/utils_groupcomparison.R +++ b/R/utils_groupcomparison.R @@ -1,75 +1,78 @@ #' Check if data represents repeated measurements design -#' +#' #' @param summarization_output output of the dataProcess function -#' +#' #' @return logical, TRUE if data represent repeated measurements design -#' +#' #' @details This extracts information required by the group comparison workflow -#' +#' #' @export -#' +#' #' @examples #' QuantData1 <- dataProcess(SRMRawData, use_log_file = FALSE) #' checkRepeatedDesign(QuantData1) -#' -checkRepeatedDesign = function(summarization_output) { - SUBJECT = GROUP = NULL - - input = as.data.table(summarization_output$ProteinLevelData) - subject_by_group = table(input[, list(SUBJECT, GROUP)]) - subject_appearances = apply(subject_by_group, 1, function(x) sum(x > - 0)) - repeated = any(subject_appearances > 1) - if (repeated) { - msg = "Time course design of experiment - okay" - } - else { - msg = "Case control design of experiment - okay" - } - getOption("MSstatsLog")("INFO", msg) - repeated +#' +checkRepeatedDesign <- function(summarization_output) { + SUBJECT <- GROUP <- NULL + + input <- as.data.table(summarization_output$ProteinLevelData) + subject_by_group <- table(input[, list(SUBJECT, GROUP)]) + subject_appearances <- apply(subject_by_group, 1, function(x) { + sum(x > + 0) + }) + repeated <- any(subject_appearances > 1) + if (repeated) { + msg <- "Time course design of experiment - okay" + } else { + msg <- "Case control design of experiment - okay" + } + getOption("MSstatsLog")("INFO", msg) + repeated } #' Get information about number of measurements for each group -#' +#' #' @param summarization_output output of the dataProcess function -#' +#' #' @return data.table -#' +#' #' @details This function extracts information required to compute percentages #' of missing and imputed values in group comparison. -#' +#' #' @export -#' +#' #' @examples #' QuantData <- dataProcess(DDARawData, use_log_file = FALSE) #' samples_info <- getSamplesInfo(QuantData) #' samples_info -#' -getSamplesInfo = function(summarization_output) { - RUN = NULL - - summarized = as.data.table(summarization_output$ProteinLevelData) - summarized[, list(NumRuns = data.table::uniqueN(RUN)), - by = "GROUP"] +#' +getSamplesInfo <- function(summarization_output) { + RUN <- NULL + + summarized <- as.data.table(summarization_output$ProteinLevelData) + summarized[, list(NumRuns = data.table::uniqueN(RUN)), + by = "GROUP" + ] } #' Prepare data for a single protein for group comparison #' @param single_protein data.table #' @keywords internal -.prepareSingleProteinForGC = function(single_protein) { - ABUNDANCE = GROUP = SUBJECT = RUN = NULL - - data.table::setnames(single_protein, - c("LogIntensities"), - c("ABUNDANCE"), - skip_absent = TRUE) - single_protein = single_protein[!is.na(ABUNDANCE)] - single_protein[, GROUP := factor(GROUP)] - single_protein[, SUBJECT := factor(SUBJECT)] - single_protein[, RUN := factor(RUN)] - single_protein +.prepareSingleProteinForGC <- function(single_protein) { + ABUNDANCE <- GROUP <- SUBJECT <- RUN <- NULL + + data.table::setnames(single_protein, + c("LogIntensities"), + c("ABUNDANCE"), + skip_absent = TRUE + ) + single_protein <- single_protein[!is.na(ABUNDANCE)] + single_protein[, GROUP := factor(GROUP)] + single_protein[, SUBJECT := factor(SUBJECT)] + single_protein[, RUN := factor(RUN)] + single_protein } @@ -86,104 +89,118 @@ getSamplesInfo = function(summarization_output) { #' @param has_imputed if TRUE, missing values have been imputed by dataProcess #' @importFrom stats resid fitted #' @keywords internal -.fitModelSingleProtein = function(input, contrast_matrix, has_tech_replicates, - is_single_subject, repeated, groups, - samples_info, - save_fitted_models, has_imputed) { - GROUP = SUBJECT = NULL - - input[, GROUP := factor(GROUP)] - input[, SUBJECT := factor(SUBJECT)] - - protein = unique(input$Protein) - n_groups = nlevels(input$GROUP) - if (n_groups == 1) { - result = .getEmptyComparison(input, contrast_matrix, - groups, protein) - residuals = NA - fitted_values = NA - fit = NULL +.fitModelSingleProtein <- function(input, contrast_matrix, has_tech_replicates, + is_single_subject, repeated, groups, + samples_info, + save_fitted_models, has_imputed) { + GROUP <- SUBJECT <- NULL + + input[, GROUP := factor(GROUP)] + input[, SUBJECT := factor(SUBJECT)] + + protein <- unique(input$Protein) + n_groups <- nlevels(input$GROUP) + if (n_groups == 1) { + result <- .getEmptyComparison( + input, contrast_matrix, + groups, protein + ) + residuals <- NA + fitted_values <- NA + fit <- NULL + } else { + fitted_model <- .fitModelForGroupComparison( + input, repeated, + is_single_subject, + has_tech_replicates + ) + result <- .getAllComparisons( + input, fitted_model, contrast_matrix, + groups, protein + ) + result <- .countMissingPercentage( + contrast_matrix, + input, result, samples_info, + has_imputed + ) + + if (inherits(fitted_model[["full_fit"]], "lm")) { + residuals <- fitted_model[["full_fit"]][["residuals"]] + fitted_values <- fitted_model[["full_fit"]][["fitted.values"]] } else { - fitted_model = .fitModelForGroupComparison(input, repeated, - is_single_subject, - has_tech_replicates) - result = .getAllComparisons(input, fitted_model, contrast_matrix, - groups, protein) - result = .countMissingPercentage(contrast_matrix, - input, result, samples_info, - has_imputed) - - if (inherits(fitted_model[["full_fit"]], "lm")) { - residuals = fitted_model[["full_fit"]][["residuals"]] - fitted_values = fitted_model[["full_fit"]][["fitted.values"]] - } else { - residuals = resid(fitted_model[["full_fit"]]) - fitted_values = fitted(fitted_model[["full_fit"]]) - } - fit = fitted_model[["full_fit"]] - } - - if (!save_fitted_models) { - fit = NULL + residuals <- resid(fitted_model[["full_fit"]]) + fitted_values <- fitted(fitted_model[["full_fit"]]) } - - input[, residuals := residuals] - input[, fitted := fitted_values] - list(result, fit) + fit <- fitted_model[["full_fit"]] + } + + if (!save_fitted_models) { + fit <- NULL + } + + input[, residuals := residuals] + input[, fitted := fitted_values] + list(result, fit) } #' Choose a model type (fixed/mixed effects) and fit it for a single protein #' @inheritParams .fitModelSingleProtein #' @keywords internal -.fitModelForGroupComparison = function(input, repeated, is_single_subject, - has_tech_replicates) { - if (!repeated) { - if (!has_tech_replicates | is_single_subject) { - full_fit = lm(ABUNDANCE ~ GROUP, data = input) - df_full = full_fit[["df.residual"]] - } else { - full_fit = suppressMessages(try( - lme4::lmer(ABUNDANCE ~ GROUP + (1|SUBJECT), data = input), - TRUE - )) - df_full = suppressMessages(try( - lm(ABUNDANCE ~ GROUP + SUBJECT, data = input)$df.residual, - TRUE - )) - } +.fitModelForGroupComparison <- function(input, repeated, is_single_subject, + has_tech_replicates) { + if (!repeated) { + if (!has_tech_replicates | is_single_subject) { + full_fit <- lm(ABUNDANCE ~ GROUP, data = input) + df_full <- full_fit[["df.residual"]] } else { - ## time-course - if (is_single_subject) { - full_fit = lm(ABUNDANCE ~ GROUP, - data = input) - df_full = full_fit$df.residual - } else { - ## no single subject - if (!has_tech_replicates) { - full_fit = suppressMessages(try( - lme4::lmer(ABUNDANCE ~ GROUP + (1|SUBJECT), data = input), - TRUE - )) - df_full = suppressMessages(try( - lm(ABUNDANCE ~ GROUP + SUBJECT, data = input)$df.residual, - TRUE)) - } else { - full_fit = suppressMessages(try( - lme4::lmer(ABUNDANCE ~ GROUP + (1|SUBJECT) + (1|GROUP:SUBJECT), - data = input), - TRUE - )) - df_full = suppressMessages(try( - lm(ABUNDANCE ~ GROUP + SUBJECT + GROUP:SUBJECT, - data = input)$df.residual, - TRUE - )) - } - } + full_fit <- suppressMessages(try( + lme4::lmer(ABUNDANCE ~ GROUP + (1 | SUBJECT), data = input), + TRUE + )) + df_full <- suppressMessages(try( + lm(ABUNDANCE ~ GROUP + SUBJECT, data = input)$df.residual, + TRUE + )) } - list(full_fit = full_fit, - df_full = df_full) + } else { + ## time-course + if (is_single_subject) { + full_fit <- lm(ABUNDANCE ~ GROUP, + data = input + ) + df_full <- full_fit$df.residual + } else { + ## no single subject + if (!has_tech_replicates) { + full_fit <- suppressMessages(try( + lme4::lmer(ABUNDANCE ~ GROUP + (1 | SUBJECT), data = input), + TRUE + )) + df_full <- suppressMessages(try( + lm(ABUNDANCE ~ GROUP + SUBJECT, data = input)$df.residual, + TRUE + )) + } else { + full_fit <- suppressMessages(try( + lme4::lmer(ABUNDANCE ~ GROUP + (1 | SUBJECT) + (1 | GROUP:SUBJECT), + data = input + ), + TRUE + )) + df_full <- suppressMessages(try( + lm(ABUNDANCE ~ GROUP + SUBJECT + GROUP:SUBJECT, + data = input + )$df.residual, + TRUE + )) + } + } + } + list( + full_fit = full_fit, + df_full = df_full + ) } @@ -192,16 +209,16 @@ getSamplesInfo = function(summarization_output) { #' @importFrom stats vcov #' @importFrom methods is #' @keywords internal -.getModelParameters = function(fitted_model) { - if (is(fitted_model[["full_fit"]], "lm")) { - model_summary = summary(fitted_model[["full_fit"]]) - cf = model_summary[["coefficients"]] - vcv = model_summary[["cov.unscaled"]] * (model_summary[["sigma"]] ^ 2) - } else { - cf = as.matrix(lme4::fixef(fitted_model[["full_fit"]])) - vcv = as.matrix(vcov(fitted_model[["full_fit"]])) - } - list(cf = cf, vcv = vcv, df = fitted_model[["df_full"]]) +.getModelParameters <- function(fitted_model) { + if (is(fitted_model[["full_fit"]], "lm")) { + model_summary <- summary(fitted_model[["full_fit"]]) + cf <- model_summary[["coefficients"]] + vcv <- model_summary[["cov.unscaled"]] * (model_summary[["sigma"]]^2) + } else { + cf <- as.matrix(lme4::fixef(fitted_model[["full_fit"]])) + vcv <- as.matrix(vcov(fitted_model[["full_fit"]])) + } + list(cf = cf, vcv = vcv, df = fitted_model[["df_full"]]) } @@ -211,49 +228,57 @@ getSamplesInfo = function(summarization_output) { #' @param groups unique labels of experimental conditions #' @param protein name of a protein #' @keywords internal -.getEmptyComparison = function(input, contrast_matrix, groups, protein) { - all_comparisons = lapply(seq_len(nrow(contrast_matrix)), function(row_id) { - ith_comparison = contrast_matrix[row_id, , drop = FALSE] - - if (any(groups[ith_comparison != 0] %in% unique(input$GROUP))) { - msg = paste("*** error: results of protein", protein, - "for comparison", row.names(ith_comparison), - "are NA because there are measurements", - "only in a single group") - getOption("MSstatsLog")("INFO", msg) - - if (ith_comparison[ith_comparison != 0 & - (groups %in% unique(input$GROUP))] > 0) { - list( - logFC = Inf, - issue = "oneConditionMissing" - ) - } else { - list( - logFC = -Inf, - issue = "oneConditionMissing" - ) - } - } else { - msg = paste("*** error: results of protein", protein, - "for comparison", row.names(ith_comparison), - "are NA because there are no measurements", - "in both conditions.") - getOption("MSstatsLog")("INFO", msg) - - list(logFC = NA, - issue = "completeMissing") - } - }) - empty_result = data.table::rbindlist(all_comparisons, fill = TRUE) - empty_result = cbind(empty_result, - data.table::data.table( - Protein = protein, - Label = row.names(contrast_matrix), - SE = NA, Tvalue = NA, - DF = NA, pvalue = NA - )) - empty_result +.getEmptyComparison <- function(input, contrast_matrix, groups, protein) { + all_comparisons <- lapply(seq_len(nrow(contrast_matrix)), function(row_id) { + ith_comparison <- contrast_matrix[row_id, , drop = FALSE] + + if (any(groups[ith_comparison != 0] %in% unique(input$GROUP))) { + msg <- paste( + "*** error: results of protein", protein, + "for comparison", row.names(ith_comparison), + "are NA because there are measurements", + "only in a single group" + ) + getOption("MSstatsLog")("INFO", msg) + + if (ith_comparison[ith_comparison != 0 & + (groups %in% unique(input$GROUP))] > 0) { + list( + logFC = Inf, + issue = "oneConditionMissing" + ) + } else { + list( + logFC = -Inf, + issue = "oneConditionMissing" + ) + } + } else { + msg <- paste( + "*** error: results of protein", protein, + "for comparison", row.names(ith_comparison), + "are NA because there are no measurements", + "in both conditions." + ) + getOption("MSstatsLog")("INFO", msg) + + list( + logFC = NA, + issue = "completeMissing" + ) + } + }) + empty_result <- data.table::rbindlist(all_comparisons, fill = TRUE) + empty_result <- cbind( + empty_result, + data.table::data.table( + Protein = protein, + Label = row.names(contrast_matrix), + SE = NA, Tvalue = NA, + DF = NA, pvalue = NA + ) + ) + empty_result } @@ -264,27 +289,31 @@ getSamplesInfo = function(summarization_output) { #' @param groups unique labels of experimental conditions #' @param protein name of a protein #' @keywords internal -.getAllComparisons = function(input, fitted_model, contrast_matrix, - groups, protein) { - empty_conditions = setdiff(groups, unique(input$GROUP)) - parameters = .getModelParameters(fitted_model) - fit = fitted_model[["full_fit"]] - coefs = parameters$cf[, 1] - - all_comparisons = vector("list", nrow(contrast_matrix)) - for (row_id in seq_len(nrow(contrast_matrix))) { - ith_contrast = contrast_matrix[row_id, , drop = FALSE] - if (length(empty_conditions) != 0) { - result = .handleEmptyConditions(input, fit, ith_contrast, - groups, parameters, protein, - empty_conditions, coefs) - } else { - result = .handleSingleContrast(input, fit, ith_contrast, groups, - parameters, protein, coefs) - } - all_comparisons[[row_id]] = result +.getAllComparisons <- function(input, fitted_model, contrast_matrix, + groups, protein) { + empty_conditions <- setdiff(groups, unique(input$GROUP)) + parameters <- .getModelParameters(fitted_model) + fit <- fitted_model[["full_fit"]] + coefs <- parameters$cf[, 1] + + all_comparisons <- vector("list", nrow(contrast_matrix)) + for (row_id in seq_len(nrow(contrast_matrix))) { + ith_contrast <- contrast_matrix[row_id, , drop = FALSE] + if (length(empty_conditions) != 0) { + result <- .handleEmptyConditions( + input, fit, ith_contrast, + groups, parameters, protein, + empty_conditions, coefs + ) + } else { + result <- .handleSingleContrast( + input, fit, ith_contrast, groups, + parameters, protein, coefs + ) } - data.table::rbindlist(all_comparisons, fill = TRUE) + all_comparisons[[row_id]] <- result + } + data.table::rbindlist(all_comparisons, fill = TRUE) } @@ -292,68 +321,78 @@ getSamplesInfo = function(summarization_output) { #' @param input summarized data #' @param contrast single row of a contrast matrix #' @param groups unique labels of experimental conditions -#' @param parameters parameters extracted from the model +#' @param parameters parameters extracted from the model #' @param protein name of a protein #' @param empty_conditions labels of empty conditions #' @param coefs coefficient of the fitted model #' @keywords internal -.handleEmptyConditions = function(input, fit, contrast, - groups, parameters, protein, - empty_conditions, coefs) { - count_diff_pos = intersect(colnames(contrast)[contrast != 0 & contrast > 0], - empty_conditions) - count_diff_neg = intersect(colnames(contrast)[contrast != 0 & contrast < 0], - empty_conditions) - flag_issue_pos = length(count_diff_pos) != 0 - flag_issue_neg = length(count_diff_neg) != 0 - - if (any(c(flag_issue_pos, flag_issue_neg))) { - if (flag_issue_pos & flag_issue_neg) { - issue = "completeMissing" - logFC = NA - } else if (flag_issue_pos & !flag_issue_neg) { - issue_side = count_diff_pos - logFC = -Inf - issue = "oneConditionMissing" - } else if (!flag_issue_pos & flag_issue_neg) { - issue_side = count_diff_neg - logFC = Inf - issue = "oneConditionMissing" - } - result = list(Protein = protein, - logFC = logFC, - Label = row.names(contrast), - SE = NA, Tvalue = NA, DF = NA, pvalue = NA, - issue = issue) - } else { - result = .handleSingleContrast(input, fit, contrast, groups, - parameters, protein, coefs) +.handleEmptyConditions <- function(input, fit, contrast, + groups, parameters, protein, + empty_conditions, coefs) { + count_diff_pos <- intersect( + colnames(contrast)[contrast != 0 & contrast > 0], + empty_conditions + ) + count_diff_neg <- intersect( + colnames(contrast)[contrast != 0 & contrast < 0], + empty_conditions + ) + flag_issue_pos <- length(count_diff_pos) != 0 + flag_issue_neg <- length(count_diff_neg) != 0 + + if (any(c(flag_issue_pos, flag_issue_neg))) { + if (flag_issue_pos & flag_issue_neg) { + issue <- "completeMissing" + logFC <- NA + } else if (flag_issue_pos & !flag_issue_neg) { + issue_side <- count_diff_pos + logFC <- -Inf + issue <- "oneConditionMissing" + } else if (!flag_issue_pos & flag_issue_neg) { + issue_side <- count_diff_neg + logFC <- Inf + issue <- "oneConditionMissing" } - result + result <- list( + Protein = protein, + logFC = logFC, + Label = row.names(contrast), + SE = NA, Tvalue = NA, DF = NA, pvalue = NA, + issue = issue + ) + } else { + result <- .handleSingleContrast( + input, fit, contrast, groups, + parameters, protein, coefs + ) + } + result } #' Group comparison for a single contrast #' @inheritParams .handleEmptyConditions #' @keywords internal -.handleSingleContrast = function(input, fit, contrast, groups, - parameters, protein, coefs) { - groups = sort(groups) - contrast_values = .getContrast(input, contrast, coefs, groups) - parameters$cf = parameters$cf[names(contrast_values), , drop = FALSE] - parameters$vcv = parameters$vcv[names(contrast_values), names(contrast_values)] - result = get_estimable_fixed_random(parameters, contrast_values) - if (is.null(result)) { - result = list(Protein = protein, - Label = row.names(contrast), - logFC = NA, SE = NA, Tvalue = NA, - DF = NA, pvalue = NA, issue = NA) - } else { - result$Protein = protein - result$Label = row.names(contrast) - result$issue = NA - } - result +.handleSingleContrast <- function(input, fit, contrast, groups, + parameters, protein, coefs) { + groups <- sort(groups) + contrast_values <- .getContrast(input, contrast, coefs, groups) + parameters$cf <- parameters$cf[names(contrast_values), , drop = FALSE] + parameters$vcv <- parameters$vcv[names(contrast_values), names(contrast_values)] + result <- get_estimable_fixed_random(parameters, contrast_values) + if (is.null(result)) { + result <- list( + Protein = protein, + Label = row.names(contrast), + logFC = NA, SE = NA, Tvalue = NA, + DF = NA, pvalue = NA, issue = NA + ) + } else { + result$Protein <- protein + result$Label <- row.names(contrast) + result$issue <- NA + } + result } @@ -363,27 +402,27 @@ getSamplesInfo = function(summarization_output) { #' @param coefs coefficients of a linear model (named vector) #' @param groups unique group labels #' @keywords internal -.getContrast = function(input, contrast, coefs, groups) { - coef_names = names(coefs) - intercept = grep("Intercept", coef_names, value = TRUE) - if (length(intercept) > 0) { - intercept_term = rep(0, length(intercept)) - names(intercept_term) = intercept - } else { - intercept_term = NULL - } - group = grep("GROUP", coef_names, value = TRUE) - interaction = grep(":", coef_names, value = TRUE) - group = setdiff(group, interaction) - if (length(group) > 0) { - group_term = contrast[, as.character(groups[groups %in% unique(input$GROUP)])] - names(group_term) = paste0("GROUP", names(group_term)) - group_term = group_term[-1] - } else { - group_term = NULL - } - contrast = c(intercept_term, group_term) - contrast[!is.na(coefs)] +.getContrast <- function(input, contrast, coefs, groups) { + coef_names <- names(coefs) + intercept <- grep("Intercept", coef_names, value = TRUE) + if (length(intercept) > 0) { + intercept_term <- rep(0, length(intercept)) + names(intercept_term) <- intercept + } else { + intercept_term <- NULL + } + group <- grep("GROUP", coef_names, value = TRUE) + interaction <- grep(":", coef_names, value = TRUE) + group <- setdiff(group, interaction) + if (length(group) > 0) { + group_term <- contrast[, as.character(groups[groups %in% unique(input$GROUP)])] + names(group_term) <- paste0("GROUP", names(group_term)) + group_term <- group_term[-1] + } else { + group_term <- NULL + } + contrast <- c(intercept_term, group_term) + contrast[!is.na(coefs)] } @@ -394,49 +433,58 @@ getSamplesInfo = function(summarization_output) { #' @param samples_info number of runs per group #' @param has_imputed if TRUE, missing values have been imputed by dataProcess #' @keywords internal -.countMissingPercentage = function(contrast_matrix, summarized, - result, samples_info, has_imputed) { - TotalGroupMeasurements = NumMeasuredFeature = NumImputedFeature = NULL - NumFea = NumRuns = totalN = NULL - - counts = summarized[, - list(totalN = unique(TotalGroupMeasurements), - NumMeasuredFeature = sum(NumMeasuredFeature, - na.rm = TRUE), - NumImputedFeature = sum(NumImputedFeature, - na.rm = TRUE)), - by = "GROUP"] - - empty_conditions = setdiff(samples_info$GROUP, unique(counts$GROUP)) - if (length(empty_conditions) !=0) { - counts = merge(samples_info, counts, by = "GROUP", all.x = TRUE) - counts[, NumFea := totalN / NumRuns ] # calculate number of features, to get the expected number of measurements in missing conditions - nofea = max(ceiling(counts$NumFea), na.rm = TRUE) # it should be integer, just in case of double, use ceiling to get interger - counts[is.na(totalN), NumMeasuredFeature := 0] - counts[is.na(totalN), NumImputedFeature := 0] - counts[is.na(totalN), totalN := NumRuns * nofea] - } - - missing_vector = numeric(nrow(contrast_matrix)) - imputed_vector = numeric(nrow(contrast_matrix)) - for (i in seq_len(nrow(contrast_matrix))) { - conditions = contrast_matrix[i, ] != 0 - missing_percentage = 1 - sum(counts$NumMeasuredFeature[conditions], - na.rm = TRUE) / sum(counts$totalN[conditions], - na.rm = TRUE) - if (has_imputed) { - imputed_percentage = sum(counts$NumImputedFeature[conditions], - na.rm = TRUE) / sum(counts$totalN[conditions], - na.rm = TRUE) - imputed_vector[i] = imputed_percentage - } - missing_vector[i] = missing_percentage - } - result$MissingPercentage = missing_vector +.countMissingPercentage <- function(contrast_matrix, summarized, + result, samples_info, has_imputed) { + TotalGroupMeasurements <- NumMeasuredFeature <- NumImputedFeature <- NULL + NumFea <- NumRuns <- totalN <- NULL + + counts <- summarized[, + list( + totalN = unique(TotalGroupMeasurements), + NumMeasuredFeature = sum(NumMeasuredFeature, + na.rm = TRUE + ), + NumImputedFeature = sum(NumImputedFeature, + na.rm = TRUE + ) + ), + by = "GROUP" + ] + + empty_conditions <- setdiff(samples_info$GROUP, unique(counts$GROUP)) + if (length(empty_conditions) != 0) { + counts <- merge(samples_info, counts, by = "GROUP", all.x = TRUE) + counts[, NumFea := totalN / NumRuns] # calculate number of features, to get the expected number of measurements in missing conditions + nofea <- max(ceiling(counts$NumFea), na.rm = TRUE) # it should be integer, just in case of double, use ceiling to get interger + counts[is.na(totalN), NumMeasuredFeature := 0] + counts[is.na(totalN), NumImputedFeature := 0] + counts[is.na(totalN), totalN := NumRuns * nofea] + } + + missing_vector <- numeric(nrow(contrast_matrix)) + imputed_vector <- numeric(nrow(contrast_matrix)) + for (i in seq_len(nrow(contrast_matrix))) { + conditions <- contrast_matrix[i, ] != 0 + missing_percentage <- 1 - sum(counts$NumMeasuredFeature[conditions], + na.rm = TRUE + ) / sum(counts$totalN[conditions], + na.rm = TRUE + ) if (has_imputed) { - result$ImputationPercentage = imputed_vector + imputed_percentage <- sum(counts$NumImputedFeature[conditions], + na.rm = TRUE + ) / sum(counts$totalN[conditions], + na.rm = TRUE + ) + imputed_vector[i] <- imputed_percentage } - result + missing_vector[i] <- missing_percentage + } + result$MissingPercentage <- missing_vector + if (has_imputed) { + result$ImputationPercentage <- imputed_vector + } + result } #' Perform group comparison per protein in parallel @@ -445,37 +493,42 @@ getSamplesInfo = function(summarization_output) { #' @param save_fitted_models if TRUE, fitted models will be included in the output #' @param repeated logical, output of checkRepeatedDesign function #' @param samples_info data.table, output of getSamplesInfo function -#' @param numberOfCores Number of cores for parallel processing. -#' A logfile named `MSstats_groupComparison_log_progress.log` is created to +#' @param numberOfCores Number of cores for parallel processing. +#' A logfile named `MSstats_groupComparison_log_progress.log` is created to #' track progress. Only works for Linux & Mac OS. #' @importFrom parallel makeCluster clusterExport parLapply stopCluster #' @keywords internal -.groupComparisonWithMultipleCores = function(summarized_list, contrast_matrix, - save_fitted_models, repeated, samples_info, - numberOfCores) { - groups = colnames(contrast_matrix) - has_imputed = attr(summarized_list, "has_imputed") - all_proteins_id = seq_along(summarized_list) - function_environment = environment() - cl = parallel::makeCluster(numberOfCores) - parallel::clusterExport(cl, c("MSstatsGroupComparisonSingleProtein", - "contrast_matrix", "repeated", "groups", - "samples_info", "save_fitted_models", "has_imputed"), - envir = function_environment) - cat(paste0("Number of proteins to process: ", length(all_proteins_id)), - sep = "\n", file = "MSstats_groupComparison_log_progress.log") - test_results = parallel::parLapply(cl, all_proteins_id, function(i) { - if (i %% 100 == 0) { - cat("Finished processing an additional 100 protein comparisons", - sep = "\n", file = "MSstats_groupComparison_log_progress.log", append = TRUE) - } - MSstatsGroupComparisonSingleProtein( - summarized_list[[i]], contrast_matrix, repeated, - groups, samples_info, save_fitted_models, has_imputed - ) - }) - parallel::stopCluster(cl) - test_results +.groupComparisonWithMultipleCores <- function(summarized_list, contrast_matrix, + save_fitted_models, repeated, samples_info, + numberOfCores) { + groups <- colnames(contrast_matrix) + has_imputed <- attr(summarized_list, "has_imputed") + all_proteins_id <- seq_along(summarized_list) + function_environment <- environment() + cl <- parallel::makeCluster(numberOfCores) + parallel::clusterExport(cl, c( + "MSstatsGroupComparisonSingleProtein", + "contrast_matrix", "repeated", "groups", + "samples_info", "save_fitted_models", "has_imputed" + ), + envir = function_environment + ) + cat(paste0("Number of proteins to process: ", length(all_proteins_id)), + sep = "\n", file = "MSstats_groupComparison_log_progress.log" + ) + test_results <- parallel::parLapply(cl, all_proteins_id, function(i) { + if (i %% 100 == 0) { + cat("Finished processing an additional 100 protein comparisons", + sep = "\n", file = "MSstats_groupComparison_log_progress.log", append = TRUE + ) + } + MSstatsGroupComparisonSingleProtein( + summarized_list[[i]], contrast_matrix, repeated, + groups, samples_info, save_fitted_models, has_imputed + ) + }) + parallel::stopCluster(cl) + test_results } #' Perform group comparison per protein iteratively with a single loop @@ -486,23 +539,22 @@ getSamplesInfo = function(summarization_output) { #' @param samples_info data.table, output of getSamplesInfo function #' @importFrom utils txtProgressBar setTxtProgressBar #' @keywords internal -.groupComparisonWithSingleCore = function(summarized_list, contrast_matrix, - save_fitted_models, repeated, - samples_info) { - groups = colnames(contrast_matrix) - has_imputed = attr(summarized_list, "has_imputed") - all_proteins_id = seq_along(summarized_list) - test_results = vector("list", length(all_proteins_id)) - pb = txtProgressBar(max = length(all_proteins_id), style = 3) - for (i in all_proteins_id) { - comparison_outputs = MSstatsGroupComparisonSingleProtein( - summarized_list[[i]], contrast_matrix, repeated, - groups, samples_info, save_fitted_models, has_imputed - ) - test_results[[i]] = comparison_outputs - setTxtProgressBar(pb, i) - } - close(pb) - test_results +.groupComparisonWithSingleCore <- function(summarized_list, contrast_matrix, + save_fitted_models, repeated, + samples_info) { + groups <- colnames(contrast_matrix) + has_imputed <- attr(summarized_list, "has_imputed") + all_proteins_id <- seq_along(summarized_list) + test_results <- vector("list", length(all_proteins_id)) + pb <- txtProgressBar(max = length(all_proteins_id), style = 3) + for (i in all_proteins_id) { + comparison_outputs <- MSstatsGroupComparisonSingleProtein( + summarized_list[[i]], contrast_matrix, repeated, + groups, samples_info, save_fitted_models, has_imputed + ) + test_results[[i]] <- comparison_outputs + setTxtProgressBar(pb, i) + } + close(pb) + test_results } - diff --git a/R/utils_groupcomparison_checks.R b/R/utils_groupcomparison_checks.R index d972edeb..4dd100c8 100644 --- a/R/utils_groupcomparison_checks.R +++ b/R/utils_groupcomparison_checks.R @@ -1,17 +1,21 @@ #' Check if groupComparison input was processed by the dataProcess function #' @param input data.table #' @keywords internal -.checkGroupComparisonInput = function(input) { - cols = c("RUN", "Protein", "LogIntensities", "originalRUN", - "GROUP", "SUBJECT", "more50missing", "NumMeasuredFeature") - - if (length(setdiff(toupper(cols), toupper(colnames(input)))) > 0) { - msg = paste("The `data` input was not processed by the dataProcess function.", - "Please use the dataProcess function first.") - getOption("MSstatsLog")("INFO", msg) - stop(msg) - } - input +.checkGroupComparisonInput <- function(input) { + cols <- c( + "RUN", "Protein", "LogIntensities", "originalRUN", + "GROUP", "SUBJECT", "more50missing", "NumMeasuredFeature" + ) + + if (length(setdiff(toupper(cols), toupper(colnames(input)))) > 0) { + msg <- paste( + "The `data` input was not processed by the dataProcess function.", + "Please use the dataProcess function first." + ) + getOption("MSstatsLog")("INFO", msg) + stop(msg) + } + input } @@ -19,48 +23,56 @@ #' @param contrast_matrix contrast matrix #' @param input data.table of summarized data #' @keywords internal -.checkContrastMatrix = function(contrast_matrix, input) { - if (ncol(contrast_matrix) != nlevels(input$GROUP)) { - msg = paste("Number of columns of the contrast.matrix parameter must be", - "equal to the number of groups.") - getOption("MSstatsLog")("ERROR", msg) - stop(msg) - } - - if (any(is.null(row.names(contrast_matrix)))) { - msg = paste("All rows of the contrast.matrix parameter", - "must be named") - getOption("MSstatsLog")("ERROR", msg) - stop(msg) - } - contrast_matrix +.checkContrastMatrix <- function(contrast_matrix, input) { + if (ncol(contrast_matrix) != nlevels(input$GROUP)) { + msg <- paste( + "Number of columns of the contrast.matrix parameter must be", + "equal to the number of groups." + ) + getOption("MSstatsLog")("ERROR", msg) + stop(msg) + } + + if (any(is.null(row.names(contrast_matrix)))) { + msg <- paste( + "All rows of the contrast.matrix parameter", + "must be named" + ) + getOption("MSstatsLog")("ERROR", msg) + stop(msg) + } + contrast_matrix } #' Check if there is only single subject #' @param input data.table #' @keywords internal -.checkSingleSubject = function(input) { - SUBJECT = GROUP = NULL - - unique_annot = unique(input[, list(GROUP, SUBJECT)]) - subject_counts = unique_annot[, list(NumSubjects = data.table::uniqueN(SUBJECT)), - by = "GROUP"] - all(subject_counts$NumSubject == 1) +.checkSingleSubject <- function(input) { + SUBJECT <- GROUP <- NULL + + unique_annot <- unique(input[, list(GROUP, SUBJECT)]) + subject_counts <- unique_annot[, list(NumSubjects = data.table::uniqueN(SUBJECT)), + by = "GROUP" + ] + all(subject_counts$NumSubject == 1) } #' Check if there are technical replicates #' @param input data.table #' @keywords internal -.checkTechReplicate = function(input) { - GROUP = RUN = SUBJECT = NULL - - unique_annot = unique(input[, list(RUN, - SUBJECT_NESTED = paste(GROUP, - SUBJECT, - sep = "."))]) - run_counts = unique_annot[, list(NumRuns = data.table::uniqueN(RUN)), - by = "SUBJECT_NESTED"] - any(run_counts[["NumRuns"]] > 1) +.checkTechReplicate <- function(input) { + GROUP <- RUN <- SUBJECT <- NULL + + unique_annot <- unique(input[, list(RUN, + SUBJECT_NESTED = paste(GROUP, + SUBJECT, + sep = "." + ) + )]) + run_counts <- unique_annot[, list(NumRuns = data.table::uniqueN(RUN)), + by = "SUBJECT_NESTED" + ] + any(run_counts[["NumRuns"]] > 1) } diff --git a/R/utils_groupcomparison_contrasts.R b/R/utils_groupcomparison_contrasts.R index d628ed58..d861e2c9 100644 --- a/R/utils_groupcomparison_contrasts.R +++ b/R/utils_groupcomparison_contrasts.R @@ -1,95 +1,118 @@ #' @export -MSstatsContrastMatrix.list = function(contrasts, conditions, labels = NULL) { - num_conditions = length(conditions) - contrast_matrix = matrix(0, nrow = length(contrasts), - ncol = num_conditions) - for (contrast_id in seq_along(contrasts)) { - contrast = contrasts[[contrast_id]] - contrast_vector = rep(0, num_conditions) - positive = conditions %in% contrast[[1]] - negative = conditions %in% contrast[[2]] - contrast_vector[positive] = 1 / sum(positive) - contrast_vector[negative] = -1 / sum(negative) - contrast_matrix[contrast_id, ] = contrast_vector - } - if (is.null(labels)) { - row.names(contrast_matrix) = .getContrastLabels(contrasts) - } else { - row.names(contrast_matrix) = labels - } - colnames(contrast_matrix) = conditions - contrast_matrix +MSstatsContrastMatrix.list <- function(contrasts, conditions, labels = NULL) { + num_conditions <- length(conditions) + contrast_matrix <- matrix(0, + nrow = length(contrasts), + ncol = num_conditions + ) + for (contrast_id in seq_along(contrasts)) { + contrast <- contrasts[[contrast_id]] + contrast_vector <- rep(0, num_conditions) + positive <- conditions %in% contrast[[1]] + negative <- conditions %in% contrast[[2]] + contrast_vector[positive] <- 1 / sum(positive) + contrast_vector[negative] <- -1 / sum(negative) + contrast_matrix[contrast_id, ] <- contrast_vector + } + if (is.null(labels)) { + row.names(contrast_matrix) <- .getContrastLabels(contrasts) + } else { + row.names(contrast_matrix) <- labels + } + colnames(contrast_matrix) <- conditions + contrast_matrix } #' @importFrom utils combn #' @export -MSstatsContrastMatrix.character = function(contrasts, conditions, labels = NULL) { - if (contrasts == "pairwise") { - contrast_combinations = combn(conditions, 2) - num_combinations = ncol(contrast_combinations) - contrasts_list = lapply(seq_len(num_combinations), - function(x) list(contrast_combinations[1, x], - contrast_combinations[2, x])) - MSstatsContrastMatrix.list(contrasts_list, conditions) - } else { - stop(paste("Contrast matrix of type", contrasts, "not implemented")) - } +MSstatsContrastMatrix.character <- function(contrasts, conditions, labels = NULL) { + if (contrasts == "pairwise") { + contrast_combinations <- combn(conditions, 2) + num_combinations <- ncol(contrast_combinations) + contrasts_list <- lapply( + seq_len(num_combinations), + function(x) { + list( + contrast_combinations[1, x], + contrast_combinations[2, x] + ) + } + ) + MSstatsContrastMatrix.list(contrasts_list, conditions) + } else { + stop(paste("Contrast matrix of type", contrasts, "not implemented")) + } } #' @export -MSstatsContrastMatrix.matrix = function(contrasts, conditions, labels = NULL) { - groups_matrix = colnames(contrasts) - checkmate::assertSetEqual(groups_matrix, as.character(conditions), - .var.name = "colnames of contrast matrix") - if (is.null(row.names(contrasts))) { - stop("Row names of the contrast matrix must be the contrast labels") - } - contrasts +MSstatsContrastMatrix.matrix <- function(contrasts, conditions, labels = NULL) { + groups_matrix <- colnames(contrasts) + checkmate::assertSetEqual(groups_matrix, as.character(conditions), + .var.name = "colnames of contrast matrix" + ) + if (is.null(row.names(contrasts))) { + stop("Row names of the contrast matrix must be the contrast labels") + } + contrasts } #' @export -MSstatsContrastMatrix.data.frame = function(contrasts, conditions, labels) { - groups_matrix = colnames(contrasts) - checkmate::assertSetEqual(groups_matrix, as.character(conditions), - .var.name = "colnames of contrast matrix") - if (is.null(row.names(contrasts))) { - stop("Row names of the contrast matrix must be the contrast labels") - } - as.matrix(contrasts) +MSstatsContrastMatrix.data.frame <- function(contrasts, conditions, labels) { + groups_matrix <- colnames(contrasts) + checkmate::assertSetEqual(groups_matrix, as.character(conditions), + .var.name = "colnames of contrast matrix" + ) + if (is.null(row.names(contrasts))) { + stop("Row names of the contrast matrix must be the contrast labels") + } + as.matrix(contrasts) } #' Create a contrast matrix for groupComparison function -#' +#' #' @param contrasts One of the following: -#' i) list of lists. Each sub-list consists of two vectors that name +#' i) list of lists. Each sub-list consists of two vectors that name #' conditions that will be compared. See the details section for more information #' ii) matrix. In this case, it's correctness will be checked #' iii) "pairwise". In this case, pairwise comparison matrix will be generated #' iv) data.frame. In this case, input will be converted to matrix -#' @param conditions unique condition labels +#' @param conditions unique condition labels #' @param labels labels for contrasts (row.names of the contrast matrix) -#' +#' #' @export -#' -MSstatsContrastMatrix = function(contrasts, conditions, labels = NULL) { - UseMethod("MSstatsContrastMatrix", contrasts) +#' +MSstatsContrastMatrix <- function(contrasts, conditions, labels = NULL) { + UseMethod("MSstatsContrastMatrix", contrasts) } -#' Get labels for contrasts +#' Get labels for contrasts #' @param contrasts list of lists of condition labels #' @keywords internal -.getContrastLabels = function(contrasts) { - prelabels = lapply(contrasts, - function(x) sapply(x, - function(y) - paste(y, sep = ",", - collapse = ","))) - labels = sapply(prelabels, function(x) paste(x, sep = " vs ", - collapse = " vs ")) - labels +.getContrastLabels <- function(contrasts) { + prelabels <- lapply( + contrasts, + function(x) { + sapply( + x, + function(y) { + paste(y, + sep = ",", + collapse = "," + ) + } + ) + } + ) + labels <- sapply(prelabels, function(x) { + paste(x, + sep = " vs ", + collapse = " vs " + ) + }) + labels } diff --git a/R/utils_groupcomparison_plots.R b/R/utils_groupcomparison_plots.R index e6040fc1..7998d44d 100644 --- a/R/utils_groupcomparison_plots.R +++ b/R/utils_groupcomparison_plots.R @@ -4,54 +4,60 @@ #' @param selected_labels character vector of contrast labels #' @param all_labels character vector of all contrast labels #' @keywords internal -.checkGCPlotsInput = function(type, log_base, selected_labels, all_labels) { - checkmate::assertChoice(type, c("HEATMAP", "VOLCANOPLOT", "COMPARISONPLOT")) - checkmate::assertChoice(log_base, c(2, 10)) - if (selected_labels != "all") { - if (is.character(selected_labels)) { - chosen_labels = selected_labels - print("labels") - print(chosen_labels) - wrong_labels = setdiff(chosen_labels, all_labels) - if (length(wrong_labels) > 0) { - msg_1 = paste("Please check labels of comparisons.", - "Result does not have the following comparisons:") - msg_2 = paste(wrong_labels, sep = ", ", collapse = ", ") - msg = paste(msg_1, msg_2) - stop(msg) - } - } - if (is.numeric(selected_labels)) { - n_labels = length(all_labels) - if (n_labels < max(selected_labels)) { - msg = paste("Please check your selection of comparisons. There are", - n_labels, "comparisons in this result.") - stop(msg) - } else { - chosen_labels = all_labels[selected_labels] - } - } - } else { - chosen_labels = all_labels +.checkGCPlotsInput <- function(type, log_base, selected_labels, all_labels) { + checkmate::assertChoice(type, c("HEATMAP", "VOLCANOPLOT", "COMPARISONPLOT")) + checkmate::assertChoice(log_base, c(2, 10)) + if (selected_labels != "all") { + if (is.character(selected_labels)) { + chosen_labels <- selected_labels + print("labels") + print(chosen_labels) + wrong_labels <- setdiff(chosen_labels, all_labels) + if (length(wrong_labels) > 0) { + msg_1 <- paste( + "Please check labels of comparisons.", + "Result does not have the following comparisons:" + ) + msg_2 <- paste(wrong_labels, sep = ", ", collapse = ", ") + msg <- paste(msg_1, msg_2) + stop(msg) + } + } + if (is.numeric(selected_labels)) { + n_labels <- length(all_labels) + if (n_labels < max(selected_labels)) { + msg <- paste( + "Please check your selection of comparisons. There are", + n_labels, "comparisons in this result." + ) + stop(msg) + } else { + chosen_labels <- all_labels[selected_labels] + } } - chosen_labels + } else { + chosen_labels <- all_labels + } + chosen_labels } #' @importFrom stats quantile dist #' @keywords internal -.getOrderedMatrix = function(input, type) { - input_tmp = input - input_tmp[is.na(input)] = 50 - if (toupper(type) == "PROTEIN") { - input = input[hclust(dist(input_tmp), method = "ward.D")$order, ] - } else if (toupper(type) == "COMPARISON") { - input = input[, hclust(dist(t(input_tmp)), method = "ward.D")$order] - } else if (toupper(type) == "BOTH") { - input = input[hclust(dist(input_tmp), method = "ward.D")$order, - hclust(dist(t(input)), method = "ward.D")$order] - } - input +.getOrderedMatrix <- function(input, type) { + input_tmp <- input + input_tmp[is.na(input)] <- 50 + if (toupper(type) == "PROTEIN") { + input <- input[hclust(dist(input_tmp), method = "ward.D")$order, ] + } else if (toupper(type) == "COMPARISON") { + input <- input[, hclust(dist(t(input_tmp)), method = "ward.D")$order] + } else if (toupper(type) == "BOTH") { + input <- input[ + hclust(dist(input_tmp), method = "ward.D")$order, + hclust(dist(t(input)), method = "ward.D")$order + ] + } + input } @@ -61,119 +67,128 @@ colMin <- function(data) sapply(data, min, na.rm = TRUE) #' Create colorkey for ggplot2 heatmap #' @param my.colors blocks #' @keywords internal -.getColorKeyGGPlot2 = function(my.colors, blocks) { - x.at = seq(-0.05, 1.05, length.out = 14) - par(mar = c(3, 3, 3, 3), mfrow = c(3, 1), oma = c(3, 0, 3, 0)) - plot.new() - image(z = matrix(seq(seq_len(length(my.colors) -1)), ncol = 1), - col = my.colors, - xaxt = "n", - yaxt = "n") - mtext("Color Key", side = 3,line = 1, cex = 3) - mtext("(sign) Adjusted p-value", side = 1, line = 3, at = 0.5, cex = 1.7) - mtext(blocks, side = 1, line = 1, at = x.at, cex = 1) +.getColorKeyGGPlot2 <- function(my.colors, blocks) { + x.at <- seq(-0.05, 1.05, length.out = 14) + par(mar = c(3, 3, 3, 3), mfrow = c(3, 1), oma = c(3, 0, 3, 0)) + plot.new() + image( + z = matrix(seq(seq_len(length(my.colors) - 1)), ncol = 1), + col = my.colors, + xaxt = "n", + yaxt = "n" + ) + mtext("Color Key", side = 3, line = 1, cex = 3) + mtext("(sign) Adjusted p-value", side = 1, line = 3, at = 0.5, cex = 1.7) + mtext(blocks, side = 1, line = 1, at = x.at, cex = 1) } #' Create colorkey for plotly heatmap #' @param my.colors blocks #' @keywords internal -.getColorKeyPlotly = function(my.colors, blocks) { - color.key.plot <- plotly::layout( - plot_ly(type = "image", z = list(my.colors)), - xaxis = list( - dtick = 0, - ticktext = as.character(blocks), - tickmode = "array", - tickvals = -0.5:length(blocks), - tickangle = 0, - title = "(sign) Adjusted p-value" - ), - yaxis = list( - ticks = "", - showticklabels = FALSE - ) +.getColorKeyPlotly <- function(my.colors, blocks) { + color.key.plot <- plotly::layout( + plot_ly(type = "image", z = list(my.colors)), + xaxis = list( + dtick = 0, + ticktext = as.character(blocks), + tickmode = "array", + tickvals = -0.5:length(blocks), + tickangle = 0, + title = "(sign) Adjusted p-value" + ), + yaxis = list( + ticks = "", + showticklabels = FALSE ) - - color.key.plot <- plotly::style(color.key.plot, hoverinfo = "none") - color.key.plot + ) + + color.key.plot <- plotly::style(color.key.plot, hoverinfo = "none") + color.key.plot } #' Create heatmap #' @param input data.table #' @inheritParams groupComparisonPlots #' @keywords internal -.makeHeatmapPlotly = function(input, my.colors, my.breaks, x.axis.size, y.axis.size, height, numProtein) { - input <- input[1:pmin(numProtein, nrow(input)), ,drop=F] - par(oma = c(3, 0, 0, 4)) - label_formatter <- list( - title = "", - # titlefont = f1, - showticklabels = TRUE, - tickangle = 45, - # tickfont = f2, - exponentformat = "E") - - # adjust my.breaks - x = my.breaks - dltx <- diff(x)[1] - x <- sort(c(x,-dltx/16,dltx/16)) - x <- x[x!=0] - x.resc <- (x-min(x))/(max(x)-min(x)) +.makeHeatmapPlotly <- function(input, my.colors, my.breaks, x.axis.size, y.axis.size, height, numProtein) { + input <- input[1:pmin(numProtein, nrow(input)), , drop = F] + par(oma = c(3, 0, 0, 4)) + label_formatter <- list( + title = "", + # titlefont = f1, + showticklabels = TRUE, + tickangle = 45, + # tickfont = f2, + exponentformat = "E" + ) - # get color scale - cols = my.colors - colorScale <- data.frame( - z = c(0,rep(x.resc[2:(length(x.resc)-1)],each=2),1), - col=rep(cols,each=2) - ) - - # Creating the custom hover text matrix - row_names <- rownames(input) - col_names <- colnames(input) - hover_text_matrix <- matrix("", nrow = nrow(input), ncol = ncol(input)) - for (i in 1:nrow(input)) { - for (j in 1:ncol(input)) { - hover_text_matrix[i, j] <- sprintf("Comparison: %s
Protein: %s
Value: %.2f", - col_names[j], - row_names[i], - input[i, j]) - } + # adjust my.breaks + x <- my.breaks + dltx <- diff(x)[1] + x <- sort(c(x, -dltx / 16, dltx / 16)) + x <- x[x != 0] + x.resc <- (x - min(x)) / (max(x) - min(x)) + + # get color scale + cols <- my.colors + colorScale <- data.frame( + z = c(0, rep(x.resc[2:(length(x.resc) - 1)], each = 2), 1), + col = rep(cols, each = 2) + ) + + # Creating the custom hover text matrix + row_names <- rownames(input) + col_names <- colnames(input) + hover_text_matrix <- matrix("", nrow = nrow(input), ncol = ncol(input)) + for (i in 1:nrow(input)) { + for (j in 1:ncol(input)) { + hover_text_matrix[i, j] <- sprintf( + "Comparison: %s
Protein: %s
Value: %.2f", + col_names[j], + row_names[i], + input[i, j] + ) } - - heatmap_plot = plot_ly(z = as.matrix(input), - zmin = x[1], - zmax = x[length(x)], - x = colnames(input), - xgap = 0, - y = rownames(input), - ygap = 0, - type = "heatmap", - hoverinfo = "text", - text=hover_text_matrix, - showlegend = FALSE, - showscale = FALSE, - colorscale = colorScale, - colorbar = list(ypad = 520, tick0 = x[1], dtick = dltx, len = 1, orientation = "h"), - width = 800) - - heatmap_plot <- plotly::layout(heatmap_plot, - xaxis = label_formatter, - plot_bgcolor = "grey", - height = height) - heatmap_plot + } + + heatmap_plot <- plot_ly( + z = as.matrix(input), + zmin = x[1], + zmax = x[length(x)], + x = colnames(input), + xgap = 0, + y = rownames(input), + ygap = 0, + type = "heatmap", + hoverinfo = "text", + text = hover_text_matrix, + showlegend = FALSE, + showscale = FALSE, + colorscale = colorScale, + colorbar = list(ypad = 520, tick0 = x[1], dtick = dltx, len = 1, orientation = "h"), + width = 800 + ) + + heatmap_plot <- plotly::layout(heatmap_plot, + xaxis = label_formatter, + plot_bgcolor = "grey", + height = height + ) + heatmap_plot } -.makeHeatmapGgplot2 = function(input, my.colors, my.breaks, x.axis.size, y.axis.size,height) { - par(oma = c(3, 0, 0, 4)) - heatmap.2(as.matrix(input), - col = my.colors, - Rowv = FALSE, Colv = FALSE, - dendrogram = "none", breaks = my.breaks, - trace = "none", na.color = "grey", - cexCol = (x.axis.size / 10), - cexRow = (y.axis.size / 10), - key = FALSE, - lhei = c(0.1, 0.9), lwid = c(0.1, 0.9)) +.makeHeatmapGgplot2 <- function(input, my.colors, my.breaks, x.axis.size, y.axis.size, height) { + par(oma = c(3, 0, 0, 4)) + heatmap.2(as.matrix(input), + col = my.colors, + Rowv = FALSE, Colv = FALSE, + dendrogram = "none", breaks = my.breaks, + trace = "none", na.color = "grey", + cexCol = (x.axis.size / 10), + cexRow = (y.axis.size / 10), + key = FALSE, + lhei = c(0.1, 0.9), lwid = c(0.1, 0.9) + ) } #' Create a volcano plot @@ -183,96 +198,136 @@ colMin <- function(data) sapply(data, min, na.rm = TRUE) #' @param log_base_FC 2 or 10 #' @param log_base_pval 2 or 10 #' @keywords internal -.makeVolcano = function( +.makeVolcano <- function( input, label_name, log_base_FC, log_base_pval, x.lim, ProteinName, dot.size, y.limdown, y.limup, text.size, FCcutoff, sig, x.axis.size, y.axis.size, - legend.size, log_adjp -) { - Protein = NULL - plot = ggplot(aes_string(x = "logFC", - y = log_adjp, - color = "colgroup", - label = "Protein"), - data = input) + - geom_point(size = dot.size) + - scale_colour_manual(values = c("gray65", "blue", "red"), - limits = c("black", "blue", "red"), - breaks = c("black", "blue", "red"), - labels = c("No regulation", "Down-regulated", "Up-regulated")) + - scale_y_continuous(paste0("-Log", log_base_pval, " (adjusted p-value)"), - limits = c(y.limdown, y.limup)) + - labs(title = unique(label_name)) - plot = plot + - scale_x_continuous(paste0("Log", log_base_FC, " fold change"), - limits = c(-x.lim, x.lim)) - if (ProteinName) { - if (!(length(unique(input$colgroup)) == 1 & any(unique(input$colgroup) == "black"))) { - plot = plot + - geom_text_repel(data = input[input$colgroup != "black", ], - aes(label = Protein), - size = text.size, - col = "black") - } - } - if (!FCcutoff | is.numeric(FCcutoff)) { - l = ifelse(!FCcutoff, 20, 10) - sigcut = data.table::setnames( - data.table::data.table("sigline", - seq(-x.lim, x.lim, length.out = l), - (-log(sig, base = log_base_pval)), - "twodash"), - c("Protein", "logFC", log_adjp, "line")) - } - if (!FCcutoff) { - plot = plot + - geom_line(data = sigcut, - aes_string(x = "logFC", y = log_adjp, linetype = "line"), - colour = "darkgrey", - size = 0.6, - show.legend = TRUE) + - scale_linetype_manual(values = c("twodash" = 6), - labels = c(paste0("Adj p-value cutoff (", sig, ")"))) + - guides(colour = guide_legend(override.aes = list(linetype = 0)), - linetype = guide_legend()) + legend.size, log_adjp) { + Protein <- NULL + plot <- ggplot( + aes_string( + x = "logFC", + y = log_adjp, + color = "colgroup", + label = "Protein" + ), + data = input + ) + + geom_point(size = dot.size) + + scale_colour_manual( + values = c("gray65", "blue", "red"), + limits = c("black", "blue", "red"), + breaks = c("black", "blue", "red"), + labels = c("No regulation", "Down-regulated", "Up-regulated") + ) + + scale_y_continuous(paste0("-Log", log_base_pval, " (adjusted p-value)"), + limits = c(y.limdown, y.limup) + ) + + labs(title = unique(label_name)) + plot <- plot + + scale_x_continuous(paste0("Log", log_base_FC, " fold change"), + limits = c(-x.lim, x.lim) + ) + if (ProteinName) { + if (!(length(unique(input$colgroup)) == 1 & any(unique(input$colgroup) == "black"))) { + plot <- plot + + geom_text_repel( + data = input[input$colgroup != "black", ], + aes(label = Protein), + size = text.size, + col = "black" + ) } - if (is.numeric(FCcutoff)) { - FCcutpos = data.table::setnames(data.table("sigline", - log(FCcutoff, log_base_FC), - seq(y.limdown, y.limup, length.out = 10), - "dotted"), - c("Protein", "logFC", log_adjp, "line")) - FCcutneg = data.table::setnames(data.table("sigline", - (-log(FCcutoff, log_base_FC)), - seq(y.limdown, y.limup, length.out = 10), - "dotted"), - c("Protein", "logFC", log_adjp, "line")) - plot = plot + - geom_line(data = sigcut, - aes_string(x = "logFC", y = log_adjp, linetype = "line"), - colour = "darkgrey", - size = 0.6, - show.legend = TRUE) + - geom_line(data = FCcutpos, - aes_string(x = "logFC", y = log_adjp, linetype = "line"), - colour = "darkgrey", - size = 0.6, - show.legend = TRUE) + - geom_line(data = FCcutneg, - aes_string(x = "logFC", y = log_adjp, linetype = "line"), - colour = "darkgrey", - size = 0.6) + - scale_linetype_manual(values = c("dotted" = 3, "twodash" = 6), - labels = c(paste0("Fold change cutoff (", FCcutoff, ")"), - paste0("Adj p-value cutoff (", sig, ")"))) + - guides(colour = guide_legend(override.aes = list(linetype = 0)), - linetype = guide_legend()) - } - plot = plot + - theme_msstats("VOLCANOPLOT", x.axis.size, y.axis.size, - legend.size, strip_background = element_rect(), - strip_text_x = element_text(), - legend_position = "bottom", legend.title = element_blank()) - plot + } + if (!FCcutoff | is.numeric(FCcutoff)) { + l <- ifelse(!FCcutoff, 20, 10) + sigcut <- data.table::setnames( + data.table::data.table( + "sigline", + seq(-x.lim, x.lim, length.out = l), + (-log(sig, base = log_base_pval)), + "twodash" + ), + c("Protein", "logFC", log_adjp, "line") + ) + } + if (!FCcutoff) { + plot <- plot + + geom_line( + data = sigcut, + aes_string(x = "logFC", y = log_adjp, linetype = "line"), + colour = "darkgrey", + size = 0.6, + show.legend = TRUE + ) + + scale_linetype_manual( + values = c("twodash" = 6), + labels = c(paste0("Adj p-value cutoff (", sig, ")")) + ) + + guides( + colour = guide_legend(override.aes = list(linetype = 0)), + linetype = guide_legend() + ) + } + if (is.numeric(FCcutoff)) { + FCcutpos <- data.table::setnames( + data.table( + "sigline", + log(FCcutoff, log_base_FC), + seq(y.limdown, y.limup, length.out = 10), + "dotted" + ), + c("Protein", "logFC", log_adjp, "line") + ) + FCcutneg <- data.table::setnames( + data.table( + "sigline", + (-log(FCcutoff, log_base_FC)), + seq(y.limdown, y.limup, length.out = 10), + "dotted" + ), + c("Protein", "logFC", log_adjp, "line") + ) + plot <- plot + + geom_line( + data = sigcut, + aes_string(x = "logFC", y = log_adjp, linetype = "line"), + colour = "darkgrey", + size = 0.6, + show.legend = TRUE + ) + + geom_line( + data = FCcutpos, + aes_string(x = "logFC", y = log_adjp, linetype = "line"), + colour = "darkgrey", + size = 0.6, + show.legend = TRUE + ) + + geom_line( + data = FCcutneg, + aes_string(x = "logFC", y = log_adjp, linetype = "line"), + colour = "darkgrey", + size = 0.6 + ) + + scale_linetype_manual( + values = c("dotted" = 3, "twodash" = 6), + labels = c( + paste0("Fold change cutoff (", FCcutoff, ")"), + paste0("Adj p-value cutoff (", sig, ")") + ) + ) + + guides( + colour = guide_legend(override.aes = list(linetype = 0)), + linetype = guide_legend() + ) + } + plot <- plot + + theme_msstats("VOLCANOPLOT", x.axis.size, y.axis.size, + legend.size, + strip_background = element_rect(), + strip_text_x = element_text(), + legend_position = "bottom", legend.title = element_blank() + ) + plot } @@ -281,31 +336,37 @@ colMin <- function(data) sapply(data, min, na.rm = TRUE) #' @param log_base 2 or 10 #' @inheritParams groupComparisonPlots #' @keywords internal -.makeComparison = function( - input, log_base, dot.size, x.axis.size, y.axis.size, - text.angle, hjust, vjust, y.limdown, y.limup -) { - logFC = ciw = NULL - - protein = unique(input$Protein) - plot = ggplot(input, aes_string(x = "Label", y = "logFC")) + - geom_errorbar(aes(ymax = logFC + ciw, ymin = logFC - ciw), - data = input, - width = 0.1, - colour = "red") + - geom_point(size = dot.size, - colour = "darkred") + - scale_x_discrete('Comparison') + - geom_hline(yintercept = 0, - linetype = "twodash", - colour = "darkgrey", - size = 0.6) + - labs(title = protein) + - theme_msstats("COMPARISONPLOT", x.axis.size, y.axis.size, - text_angle = text.angle, text_hjust = hjust, - text_vjust = vjust) - plot = plot + - scale_y_continuous(paste0("Log", log_base, "-Fold Change"), - limits = c(y.limdown, y.limup)) - plot +.makeComparison <- function( + input, log_base, dot.size, x.axis.size, y.axis.size, + text.angle, hjust, vjust, y.limdown, y.limup) { + logFC <- ciw <- NULL + + protein <- unique(input$Protein) + plot <- ggplot(input, aes_string(x = "Label", y = "logFC")) + + geom_errorbar(aes(ymax = logFC + ciw, ymin = logFC - ciw), + data = input, + width = 0.1, + colour = "red" + ) + + geom_point( + size = dot.size, + colour = "darkred" + ) + + scale_x_discrete("Comparison") + + geom_hline( + yintercept = 0, + linetype = "twodash", + colour = "darkgrey", + size = 0.6 + ) + + labs(title = protein) + + theme_msstats("COMPARISONPLOT", x.axis.size, y.axis.size, + text_angle = text.angle, text_hjust = hjust, + text_vjust = vjust + ) + plot <- plot + + scale_y_continuous(paste0("Log", log_base, "-Fold Change"), + limits = c(y.limdown, y.limup) + ) + plot } diff --git a/R/utils_imputation.R b/R/utils_imputation.R index 16f9cd53..12346821 100644 --- a/R/utils_imputation.R +++ b/R/utils_imputation.R @@ -1,54 +1,60 @@ #' @importFrom data.table uniqueN #' @importFrom survival survreg Surv #' @keywords internal -.fitSurvival = function(input) { - FEATURE = RUN = NULL - - missingness_filter = is.finite(input$newABUNDANCE) - n_total = nrow(input[missingness_filter, ]) - n_features = data.table::uniqueN(input[missingness_filter, FEATURE]) - n_runs = data.table::uniqueN(input[missingness_filter, RUN]) - is_labeled = data.table::uniqueN(input$LABEL) > 1 - countdf = n_total < n_features + n_runs - 1 +.fitSurvival <- function(input) { + FEATURE <- RUN <- NULL - # TODO: set.seed here? - set.seed(100) - if (is_labeled) { - if (length(unique(input$FEATURE)) == 1) { - # with single feature, not converge, wrong intercept - # need to check - fit = survreg(Surv(newABUNDANCE, cen, type='left') ~ RUN + ref, - data = input, dist = "gaussian", - control = list(maxiter=90)) - } else { - if (countdf) { - fit = survreg(Surv(newABUNDANCE, cen, type='left') ~ RUN + ref, - data = input, dist = "gaussian", - control = list(maxiter=90)) - } else { - fit = survreg(Surv(newABUNDANCE, cen, type='left') ~ FEATURE + RUN + ref, - data = input, dist = "gaussian", - control = list(maxiter=90)) - } - } + missingness_filter <- is.finite(input$newABUNDANCE) + n_total <- nrow(input[missingness_filter, ]) + n_features <- data.table::uniqueN(input[missingness_filter, FEATURE]) + n_runs <- data.table::uniqueN(input[missingness_filter, RUN]) + is_labeled <- data.table::uniqueN(input$LABEL) > 1 + countdf <- n_total < n_features + n_runs - 1 + + # TODO: set.seed here? + set.seed(100) + if (is_labeled) { + if (length(unique(input$FEATURE)) == 1) { + # with single feature, not converge, wrong intercept + # need to check + fit <- survreg(Surv(newABUNDANCE, cen, type = "left") ~ RUN + ref, + data = input, dist = "gaussian", + control = list(maxiter = 90) + ) + } else { + if (countdf) { + fit <- survreg(Surv(newABUNDANCE, cen, type = "left") ~ RUN + ref, + data = input, dist = "gaussian", + control = list(maxiter = 90) + ) + } else { + fit <- survreg(Surv(newABUNDANCE, cen, type = "left") ~ FEATURE + RUN + ref, + data = input, dist = "gaussian", + control = list(maxiter = 90) + ) + } + } + } else { + if (n_features == 1L) { + fit <- survreg(Surv(newABUNDANCE, cen, type = "left") ~ RUN, + data = input, dist = "gaussian", + control = list(maxiter = 90) + ) } else { - if (n_features == 1L) { - fit = survreg(Surv(newABUNDANCE, cen, type = "left") ~ RUN, - data = input, dist = "gaussian", - control = list(maxiter=90)) - } else { - if (countdf) { - fit = survreg(Surv(newABUNDANCE, cen, type = "left") ~ RUN, - data = input, dist = "gaussian", - control = list(maxiter=90)) - } else { - fit = survreg(Surv(newABUNDANCE, cen, type = "left") ~ FEATURE + RUN, - data = input, dist = "gaussian", - control = list(maxiter=90)) - } - } + if (countdf) { + fit <- survreg(Surv(newABUNDANCE, cen, type = "left") ~ RUN, + data = input, dist = "gaussian", + control = list(maxiter = 90) + ) + } else { + fit <- survreg(Surv(newABUNDANCE, cen, type = "left") ~ FEATURE + RUN, + data = input, dist = "gaussian", + control = list(maxiter = 90) + ) + } } - fit + } + fit } @@ -57,9 +63,9 @@ #' @return numeric vector of predictions #' @importFrom stats predict #' @keywords internal -.addSurvivalPredictions = function(input) { - LABEL = NULL - - survival_fit = .fitSurvival(input[LABEL == "L", ]) - predict(survival_fit, newdata = input) +.addSurvivalPredictions <- function(input) { + LABEL <- NULL + + survival_fit <- .fitSurvival(input[LABEL == "L", ]) + predict(survival_fit, newdata = input) } diff --git a/R/utils_normalize.R b/R/utils_normalize.R index 70820e20..c539a27e 100644 --- a/R/utils_normalize.R +++ b/R/utils_normalize.R @@ -1,41 +1,41 @@ #' Normalize MS data -#' +#' #' @param input data.table in MSstats format #' @param normalization_method name of a chosen normalization method: "NONE" or #' "FALSE" for no normalization, "EQUALIZEMEDIANS" for median normalization, #' "QUANTILE" normalization for quantile normalization from `preprocessCore` package, #' "GLOBALSTANDARDS" for normalization based on selected peptides or proteins. -#' @param peptides_dict `data.table` of names of peptides and their corresponding -#' features. -#' @param standards character vector with names of standards, required if +#' @param peptides_dict `data.table` of names of peptides and their corresponding +#' features. +#' @param standards character vector with names of standards, required if #' "GLOBALSTANDARDS" method was selected. -#' +#' #' @export -#' +#' #' @return data.table -#' +#' #' @examples -#' raw = DDARawData -#' method = "TMP" -#' cens = "NA" -#' impute = TRUE +#' raw <- DDARawData +#' method <- "TMP" +#' cens <- "NA" +#' impute <- TRUE #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) -#' input = MSstatsNormalize(input, "EQUALIZEMEDIANS") # median normalization +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") # median normalization #' head(input) -#' -MSstatsNormalize = function(input, normalization_method, peptides_dict = NULL, standards = NULL) { - normalization_method = toupper(normalization_method) - if (normalization_method == "NONE" | normalization_method == "FALSE") { - return(input) - } else if (normalization_method == "EQUALIZEMEDIANS") { - input = .normalizeMedian(input) - } else if (normalization_method == "QUANTILE") { - input = .normalizeQuantile(input) - } else if (normalization_method == "GLOBALSTANDARDS") { - input = .normalizeGlobalStandards(input, peptides_dict, standards) - } - input +#' +MSstatsNormalize <- function(input, normalization_method, peptides_dict = NULL, standards = NULL) { + normalization_method <- toupper(normalization_method) + if (normalization_method == "NONE" | normalization_method == "FALSE") { + return(input) + } else if (normalization_method == "EQUALIZEMEDIANS") { + input <- .normalizeMedian(input) + } else if (normalization_method == "QUANTILE") { + input <- .normalizeQuantile(input) + } else if (normalization_method == "GLOBALSTANDARDS") { + input <- .normalizeGlobalStandards(input, peptides_dict, standards) + } + input } @@ -43,23 +43,26 @@ MSstatsNormalize = function(input, normalization_method, peptides_dict = NULL, s #' @param input `data.table` in standard MSstats format #' @importFrom stats median #' @keywords internal -.normalizeMedian = function(input) { - ABUNDANCE_RUN = ABUNDANCE_FRACTION = ABUNDANCE = NULL - - if (length(unique(input$LABEL)) == 1L) { - label = "L" - } else { - label = "H" - } - input[, ABUNDANCE_RUN := .getMedian(.SD, label), - by = c("RUN", "FRACTION"), .SDcols = c("ABUNDANCE", "LABEL")] - input[, ABUNDANCE_FRACTION := median(ABUNDANCE_RUN, na.rm = TRUE), - by = "FRACTION"] - input[, ABUNDANCE := ABUNDANCE - ABUNDANCE_RUN + ABUNDANCE_FRACTION] - input = input[, !(colnames(input) %in% c("ABUNDANCE_RUN", "ABUNDANCE_FRACTION")), - with = FALSE] - getOption("MSstatsLog")("Normalization based on median: OK") - input +.normalizeMedian <- function(input) { + ABUNDANCE_RUN <- ABUNDANCE_FRACTION <- ABUNDANCE <- NULL + + if (length(unique(input$LABEL)) == 1L) { + label <- "L" + } else { + label <- "H" + } + input[, ABUNDANCE_RUN := .getMedian(.SD, label), + by = c("RUN", "FRACTION"), .SDcols = c("ABUNDANCE", "LABEL") + ] + input[, ABUNDANCE_FRACTION := median(ABUNDANCE_RUN, na.rm = TRUE), + by = "FRACTION" + ] + input[, ABUNDANCE := ABUNDANCE - ABUNDANCE_RUN + ABUNDANCE_FRACTION] + input <- input[, !(colnames(input) %in% c("ABUNDANCE_RUN", "ABUNDANCE_FRACTION")), + with = FALSE + ] + getOption("MSstatsLog")("Normalization based on median: OK") + input } @@ -68,67 +71,77 @@ MSstatsNormalize = function(input, normalization_method, peptides_dict = NULL, s #' @param label "L" for light isotopes, "H" for heavy isotopes. #' @importFrom stats median #' @keywords internal -.getMedian = function(df, label) { - median(df$ABUNDANCE[df$LABEL == label], na.rm = TRUE) +.getMedian <- function(df, label) { + median(df$ABUNDANCE[df$LABEL == label], na.rm = TRUE) } #' Quantile normalization based on the `preprocessCore` package #' @param input `data.table` in MSstats standard format #' @keywords internal -.normalizeQuantile = function(input) { - ABUNDANCE = FRACTION = RUN = LABEL = GROUP_ORIGINAL = NULL - SUBJECT_ORIGINAL = PROTEIN = PEPTIDE = TRANSITION = INTENSITY = NULL - - input[ABUNDANCE == 0, "ABUNDANCE"] = 1 - fractions = unique(input$FRACTION) - is_labeled = data.table::uniqueN(input$LABEL) > 1 - per_fraction_normalized = vector("list", length(fractions)) - - if (!is_labeled) { - grouping_cols = c("FEATURE", "RUN") - for (fraction_id in fractions) { - fraction_runs = as.character(unique(input[FRACTION == fractions[fraction_id], RUN])) - wide = .getWideTable(input, fraction_runs) - normalized = .quantileNormalizationSingleLabel(wide, fraction_runs) - normalized = data.table::melt(normalized, id.vars = "FEATURE", - variable.name = "RUN", value.name = "ABUNDANCE") - per_fraction_normalized[[fraction_id]] = normalized - } - } else { - grouping_cols = c("LABEL", "FEATURE", "RUN") - for (fraction_id in fractions) { - fraction_runs = as.character(unique(input[FRACTION == fractions[fraction_id], RUN])) - wide_h = .getWideTable(input, fraction_runs, "H", FALSE) - normalized_h = .quantileNormalizationSingleLabel(wide_h, - fraction_runs, "H") - normalized_h$LABEL = "H" - wide_l = .getWideTable(input, fraction_runs) - for (run_col in fraction_runs) { - wide_l[[run_col]] = wide_l[[run_col]] - wide_h[[run_col]] + normalized_h[[run_col]] - } - wide_l$LABEL = "L" - per_fraction_normalized[[fraction_id]] = data.table::melt( - rbind(normalized_h, wide_l), id.vars = c("LABEL", "FEATURE"), - variable.name = "RUN", value.name = "ABUNDANCE") - } +.normalizeQuantile <- function(input) { + ABUNDANCE <- FRACTION <- RUN <- LABEL <- GROUP_ORIGINAL <- NULL + SUBJECT_ORIGINAL <- PROTEIN <- PEPTIDE <- TRANSITION <- INTENSITY <- NULL + + input[ABUNDANCE == 0, "ABUNDANCE"] <- 1 + fractions <- unique(input$FRACTION) + is_labeled <- data.table::uniqueN(input$LABEL) > 1 + per_fraction_normalized <- vector("list", length(fractions)) + + if (!is_labeled) { + grouping_cols <- c("FEATURE", "RUN") + for (fraction_id in fractions) { + fraction_runs <- as.character(unique(input[FRACTION == fractions[fraction_id], RUN])) + wide <- .getWideTable(input, fraction_runs) + normalized <- .quantileNormalizationSingleLabel(wide, fraction_runs) + normalized <- data.table::melt(normalized, + id.vars = "FEATURE", + variable.name = "RUN", value.name = "ABUNDANCE" + ) + per_fraction_normalized[[fraction_id]] <- normalized } - - per_fraction_normalized = data.table::rbindlist(per_fraction_normalized) - input = merge(input[, colnames(input) != "ABUNDANCE", with = FALSE], - per_fraction_normalized, by = grouping_cols) - input = input[order(LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL, - RUN, PROTEIN, PEPTIDE, TRANSITION), ] - input[!is.na(INTENSITY) & INTENSITY == 1, "ABUNDANCE"] = 0 # Skyline - - if (length(fractions) == 1L) { - msg = "Normalization : Quantile normalization - okay" - } else { - msg = "Normalization : Quantile normalization per fraction - okay" + } else { + grouping_cols <- c("LABEL", "FEATURE", "RUN") + for (fraction_id in fractions) { + fraction_runs <- as.character(unique(input[FRACTION == fractions[fraction_id], RUN])) + wide_h <- .getWideTable(input, fraction_runs, "H", FALSE) + normalized_h <- .quantileNormalizationSingleLabel( + wide_h, + fraction_runs, "H" + ) + normalized_h$LABEL <- "H" + wide_l <- .getWideTable(input, fraction_runs) + for (run_col in fraction_runs) { + wide_l[[run_col]] <- wide_l[[run_col]] - wide_h[[run_col]] + normalized_h[[run_col]] + } + wide_l$LABEL <- "L" + per_fraction_normalized[[fraction_id]] <- data.table::melt( + rbind(normalized_h, wide_l), + id.vars = c("LABEL", "FEATURE"), + variable.name = "RUN", value.name = "ABUNDANCE" + ) } - getOption("MSstatsLog")("INFO", msg) - - input + } + + per_fraction_normalized <- data.table::rbindlist(per_fraction_normalized) + input <- merge(input[, colnames(input) != "ABUNDANCE", with = FALSE], + per_fraction_normalized, + by = grouping_cols + ) + input <- input[order( + LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL, + RUN, PROTEIN, PEPTIDE, TRANSITION + ), ] + input[!is.na(INTENSITY) & INTENSITY == 1, "ABUNDANCE"] <- 0 # Skyline + + if (length(fractions) == 1L) { + msg <- "Normalization : Quantile normalization - okay" + } else { + msg <- "Normalization : Quantile normalization per fraction - okay" + } + getOption("MSstatsLog")("INFO", msg) + + input } @@ -138,21 +151,23 @@ MSstatsNormalize = function(input, normalization_method, peptides_dict = NULL, s #' @param label "L" for light isotopes, "H" for heavy isotopes #' @param remove_missing if TRUE, only non-missing values will be considered #' @keywords internal -.getWideTable = function(input, runs, label = "L", remove_missing = TRUE) { - RUN = NULL - - if (remove_missing) { - nonmissing_filter = !is.na(input$INTENSITY) - } else { - nonmissing_filter = rep(TRUE, nrow(input)) - } - label_filter = input$LABEL == label - - wide = data.table::dcast(input[nonmissing_filter & label_filter & (RUN %in% runs)], - FEATURE ~ RUN, value.var = "ABUNDANCE") - wide = wide[, lapply(.SD, .replaceZerosWithNA)] - colnames(wide)[-1] = runs - wide +.getWideTable <- function(input, runs, label = "L", remove_missing = TRUE) { + RUN <- NULL + + if (remove_missing) { + nonmissing_filter <- !is.na(input$INTENSITY) + } else { + nonmissing_filter <- rep(TRUE, nrow(input)) + } + label_filter <- input$LABEL == label + + wide <- data.table::dcast(input[nonmissing_filter & label_filter & (RUN %in% runs)], + FEATURE ~ RUN, + value.var = "ABUNDANCE" + ) + wide <- wide[, lapply(.SD, .replaceZerosWithNA)] + colnames(wide)[-1] <- runs + wide } @@ -162,26 +177,27 @@ MSstatsNormalize = function(input, normalization_method, peptides_dict = NULL, s #' @param label "L" for light isotopes, "H" for heavy isotopes #' @importFrom preprocessCore normalize.quantiles #' @keywords internal -.quantileNormalizationSingleLabel = function(input, runs, label = "L") { - FEATURE = NULL - - normalized = input[, list(FEATURE, preprocessCore::normalize.quantiles(as.matrix(.SD))), - .SDcols = runs] - colnames(normalized)[-1] = runs - normalized +.quantileNormalizationSingleLabel <- function(input, runs, label = "L") { + FEATURE <- NULL + + normalized <- input[, list(FEATURE, preprocessCore::normalize.quantiles(as.matrix(.SD))), + .SDcols = runs + ] + colnames(normalized)[-1] <- runs + normalized } #' Utility function for normalization: replace 0s by NA #' @param vec vector #' @keywords internal -.replaceZerosWithNA = function(vec) { - vec = unlist(vec, FALSE, FALSE) - if (is.character(vec) | is.factor(vec)) { - vec - } else { - ifelse(vec == 0, NA, vec) - } +.replaceZerosWithNA <- function(vec) { + vec <- unlist(vec, FALSE, FALSE) + if (is.character(vec) | is.factor(vec)) { + vec + } else { + ifelse(vec == 0, NA, vec) + } } @@ -189,163 +205,194 @@ MSstatsNormalize = function(input, normalization_method, peptides_dict = NULL, s #' @inheritParams MSstatsNormalize #' @importFrom data.table melt uniqueN #' @keywords internal -.normalizeGlobalStandards = function(input, peptides_dict, standards) { - PeptideSequence = PEPTIDE = PROTEIN = median_by_fraction = NULL - Standard = FRACTION = LABEL = ABUNDANCE = RUN = GROUP = NULL - - proteins = as.character(unique(input$PROTEIN)) - means_by_standard = unique(input[, list(RUN)]) - for (standard_id in seq_along(standards)) { - peptide_name = unlist(peptides_dict[PeptideSequence == standards[standard_id], - as.character(PEPTIDE)], FALSE, FALSE) - if (length(peptide_name) > 0) { - standard = input[PEPTIDE == peptide_name, ] - } else { - if (standards[standard_id] %in% proteins) { - standard = input[PROTEIN == standards[standard_id], ] - } else { - msg = paste("global standard peptides or proteins, ", - standards[standard_id],", is not in dataset.", - "Please check whether 'nameStandards' input is correct or not.") - getOption("MSstatsLog")("ERROR", msg) - stop(msg) - } - } - mean_by_run = standard[GROUP != "0" & !is.na(ABUNDANCE), - list(mean_abundance = mean(ABUNDANCE, na.rm = TRUE)), - by = "RUN"] - colnames(mean_by_run)[2] = paste0("meanStandard", standard_id) - means_by_standard = merge(means_by_standard, mean_by_run, - by = "RUN", all.x = TRUE) - } - means_by_standard = data.table::melt(means_by_standard, id.vars = "RUN", - variable.name = "Standard", value.name = "ABUNDANCE") - means_by_standard[, mean_by_run := mean(ABUNDANCE, na.rm = TRUE), by = "RUN"] - means_by_standard = merge(means_by_standard, unique(input[, list(RUN, FRACTION)]), - by = "RUN") - means_by_standard[, median_by_fraction := median(mean_by_run, na.rm = TRUE), - by = "FRACTION"] - means_by_standard[, ABUNDANCE := NULL] - means_by_standard[, Standard := NULL] - means_by_standard = unique(means_by_standard) - - input = merge(input, means_by_standard, all.x = TRUE, by = c("RUN", "FRACTION")) - input[, ABUNDANCE := ifelse(LABEL == "L", ABUNDANCE - mean_by_run + median_by_fraction, ABUNDANCE)] - - if (data.table::uniqueN(input$FRACTION) == 1L) { - msg = "Normalization : normalization with global standards protein - okay" +.normalizeGlobalStandards <- function(input, peptides_dict, standards) { + PeptideSequence <- PEPTIDE <- PROTEIN <- median_by_fraction <- NULL + Standard <- FRACTION <- LABEL <- ABUNDANCE <- RUN <- GROUP <- NULL + + proteins <- as.character(unique(input$PROTEIN)) + means_by_standard <- unique(input[, list(RUN)]) + for (standard_id in seq_along(standards)) { + peptide_name <- unlist(peptides_dict[ + PeptideSequence == standards[standard_id], + as.character(PEPTIDE) + ], FALSE, FALSE) + if (length(peptide_name) > 0) { + standard <- input[PEPTIDE == peptide_name, ] } else { - msg = "Normalization : normalization with global standards protein - okay" + if (standards[standard_id] %in% proteins) { + standard <- input[PROTEIN == standards[standard_id], ] + } else { + msg <- paste( + "global standard peptides or proteins, ", + standards[standard_id], ", is not in dataset.", + "Please check whether 'nameStandards' input is correct or not." + ) + getOption("MSstatsLog")("ERROR", msg) + stop(msg) + } } - getOption("MSstatsLog")("INFO", msg) - input[ , !(colnames(input) %in% c("mean_by_run", "median_by_fraction")), with = FALSE] + mean_by_run <- standard[GROUP != "0" & !is.na(ABUNDANCE), + list(mean_abundance = mean(ABUNDANCE, na.rm = TRUE)), + by = "RUN" + ] + colnames(mean_by_run)[2] <- paste0("meanStandard", standard_id) + means_by_standard <- merge(means_by_standard, mean_by_run, + by = "RUN", all.x = TRUE + ) + } + means_by_standard <- data.table::melt(means_by_standard, + id.vars = "RUN", + variable.name = "Standard", value.name = "ABUNDANCE" + ) + means_by_standard[, mean_by_run := mean(ABUNDANCE, na.rm = TRUE), by = "RUN"] + means_by_standard <- merge(means_by_standard, unique(input[, list(RUN, FRACTION)]), + by = "RUN" + ) + means_by_standard[, median_by_fraction := median(mean_by_run, na.rm = TRUE), + by = "FRACTION" + ] + means_by_standard[, ABUNDANCE := NULL] + means_by_standard[, Standard := NULL] + means_by_standard <- unique(means_by_standard) + + input <- merge(input, means_by_standard, all.x = TRUE, by = c("RUN", "FRACTION")) + input[, ABUNDANCE := ifelse(LABEL == "L", ABUNDANCE - mean_by_run + median_by_fraction, ABUNDANCE)] + + if (data.table::uniqueN(input$FRACTION) == 1L) { + msg <- "Normalization : normalization with global standards protein - okay" + } else { + msg <- "Normalization : normalization with global standards protein - okay" + } + getOption("MSstatsLog")("INFO", msg) + input[, !(colnames(input) %in% c("mean_by_run", "median_by_fraction")), with = FALSE] } #' Re-format the data before feature selection -#' +#' #' @param input `data.table` in MSstats format -#' +#' #' @importFrom data.table uniqueN -#' +#' #' @export -#' +#' #' @return data.table -#' -#' @examples -#' raw = DDARawData -#' method = "TMP" -#' cens = "NA" -#' impute = TRUE +#' +#' @examples +#' raw <- DDARawData +#' method <- "TMP" +#' cens <- "NA" +#' impute <- TRUE #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) -#' input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -#' input = MSstatsMergeFractions(input) +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +#' input <- MSstatsMergeFractions(input) #' head(input) -#' -MSstatsMergeFractions = function(input) { - ABUNDANCE = INTENSITY = GROUP_ORIGINAL = SUBJECT_ORIGINAL = RUN = NULL - originalRUN = FRACTION = TECHREPLICATE = tmp = merged = newRun = NULL - ncount = FEATURE = NULL - - input[!is.na(ABUNDANCE) & ABUNDANCE < 0, "ABUNDANCE"] = 0 - input[!is.na(INTENSITY) & INTENSITY == 1, "ABUNDANCE"] = 0 - if (data.table::uniqueN(input$FRACTION) == 1) { - return(input) - } else { - if (is.element("TECHREPLICATE", colnames(input))) { - run_info = unique(input[, - list(GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, - originalRUN, FRACTION, TECHREPLICATE)]) - match_runs = try( - data.table::dcast(run_info, - GROUP_ORIGINAL + SUBJECT_ORIGINAL + TECHREPLICATE ~ FRACTION, - value.var = "originalRUN"), silent = TRUE - ) - if (inherits(match_runs, "try-error")) { - msg = "*** error : can't figure out which multiple runs come from the same sample." - getOption("MSstatsLog")("ERROR", msg) - stop(msg) - } else { - input$newRun = NA - input$newRun = as.character(input$newRun) - run_info[, GROUP_ORIGINAL := as.character(GROUP_ORIGINAL)] - run_info[, SUBJECT_ORIGINAL := as.character(SUBJECT_ORIGINAL)] - for (k in seq_len(nrow(run_info))) { - input[originalRUN %in% run_info$originalRUN[k], "newRun"] = paste(paste(run_info[k, 1:4], collapse = "_"), 'merged', sep = "_") - } - - select_fraction = input[!is.na(ABUNDANCE) & ABUNDANCE > 0, - list(ncount = .N), - by = c("FEATURE", "FRACTION")] - select_fraction = select_fraction[ncount == data.table::uniqueN(input$newRun)] - select_fraction[, tmp := paste(FEATURE, FRACTION, sep = "_")] - input$tmp = paste(input$FEATURE, input$FRACTION, sep="_") - input = input[!(tmp %in% select_fraction$tmp), ] - input$originalRUN = input$newRun - input$RUN = input$originalRUN - input$RUN = factor(input$RUN, levels=unique(input$RUN), labels=seq(1, length(unique(input$RUN)))) - input = input[, !(colnames(input) %in% c('tmp','newRun')), with = FALSE] - } - } else { - run_info = unique(input[, - list(GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, - originalRUN, FRACTION)]) - match_runs = try( - data.table::dcast(run_info, - GROUP_ORIGINAL + SUBJECT_ORIGINAL ~ FRACTION, - value.var = "originalRUN"), silent = TRUE - ) - if (inherits(match_runs, "try-error")) { - msg = "*** error : can't figure out which multiple runs come from the same sample." - getOption("MSstatsLog")("ERROR", msg) - stop(msg) - } else { - match_runs[, merged := "merged"] - match_runs[, newRun := do.call(paste, c(.SD, sep = "_")), - .SDcols = c(1:3, ncol(match_runs))] - match_runs = unique(match_runs[, list(GROUP_ORIGINAL, - SUBJECT_ORIGINAL, - newRun)]) - - input = merge(input, match_runs, - by = c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL"), - all.x = TRUE) - select_fraction = input[!is.na(ABUNDANCE) & input$ABUNDANCE > 0, - list(ncount = .N), - by = c("FEATURE", "FRACTION")] - select_fraction = select_fraction[ncount != 0] - select_fraction[, tmp := paste(FEATURE, FRACTION, sep = "_")] - input$tmp = paste(input$FEATURE, input$FRACTION, sep = "_") - input = input[tmp %in% select_fraction$tmp, ] - input$originalRUN = input$newRun - input$RUN = input$originalRUN - input$RUN = factor(input$RUN, levels = unique(input$RUN), - labels = seq_along(unique(input$RUN))) - input = input[, !(colnames(input) %in% c('tmp','newRun')), - with = FALSE] - } +#' +MSstatsMergeFractions <- function(input) { + ABUNDANCE <- INTENSITY <- GROUP_ORIGINAL <- SUBJECT_ORIGINAL <- RUN <- NULL + originalRUN <- FRACTION <- TECHREPLICATE <- tmp <- merged <- newRun <- NULL + ncount <- FEATURE <- NULL + + input[!is.na(ABUNDANCE) & ABUNDANCE < 0, "ABUNDANCE"] <- 0 + input[!is.na(INTENSITY) & INTENSITY == 1, "ABUNDANCE"] <- 0 + if (data.table::uniqueN(input$FRACTION) == 1) { + return(input) + } else { + if (is.element("TECHREPLICATE", colnames(input))) { + run_info <- unique(input[ + , + list( + GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, + originalRUN, FRACTION, TECHREPLICATE + ) + ]) + match_runs <- try( + data.table::dcast(run_info, + GROUP_ORIGINAL + SUBJECT_ORIGINAL + TECHREPLICATE ~ FRACTION, + value.var = "originalRUN" + ), + silent = TRUE + ) + if (inherits(match_runs, "try-error")) { + msg <- "*** error : can't figure out which multiple runs come from the same sample." + getOption("MSstatsLog")("ERROR", msg) + stop(msg) + } else { + input$newRun <- NA + input$newRun <- as.character(input$newRun) + run_info[, GROUP_ORIGINAL := as.character(GROUP_ORIGINAL)] + run_info[, SUBJECT_ORIGINAL := as.character(SUBJECT_ORIGINAL)] + for (k in seq_len(nrow(run_info))) { + input[originalRUN %in% run_info$originalRUN[k], "newRun"] <- paste(paste(run_info[k, 1:4], collapse = "_"), "merged", sep = "_") } + + select_fraction <- input[!is.na(ABUNDANCE) & ABUNDANCE > 0, + list(ncount = .N), + by = c("FEATURE", "FRACTION") + ] + select_fraction <- select_fraction[ncount == data.table::uniqueN(input$newRun)] + select_fraction[, tmp := paste(FEATURE, FRACTION, sep = "_")] + input$tmp <- paste(input$FEATURE, input$FRACTION, sep = "_") + input <- input[!(tmp %in% select_fraction$tmp), ] + input$originalRUN <- input$newRun + input$RUN <- input$originalRUN + input$RUN <- factor(input$RUN, levels = unique(input$RUN), labels = seq(1, length(unique(input$RUN)))) + input <- input[, !(colnames(input) %in% c("tmp", "newRun")), with = FALSE] + } + } else { + run_info <- unique(input[ + , + list( + GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, + originalRUN, FRACTION + ) + ]) + match_runs <- try( + data.table::dcast(run_info, + GROUP_ORIGINAL + SUBJECT_ORIGINAL ~ FRACTION, + value.var = "originalRUN" + ), + silent = TRUE + ) + if (inherits(match_runs, "try-error")) { + msg <- "*** error : can't figure out which multiple runs come from the same sample." + getOption("MSstatsLog")("ERROR", msg) + stop(msg) + } else { + match_runs[, merged := "merged"] + match_runs[, newRun := do.call(paste, c(.SD, sep = "_")), + .SDcols = c(1:3, ncol(match_runs)) + ] + match_runs <- unique(match_runs[, list( + GROUP_ORIGINAL, + SUBJECT_ORIGINAL, + newRun + )]) + + input <- merge(input, match_runs, + by = c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL"), + all.x = TRUE + ) + select_fraction <- input[!is.na(ABUNDANCE) & input$ABUNDANCE > 0, + list(ncount = .N), + by = c("FEATURE", "FRACTION") + ] + select_fraction <- select_fraction[ncount != 0] + select_fraction[, tmp := paste(FEATURE, FRACTION, sep = "_")] + input$tmp <- paste(input$FEATURE, input$FRACTION, sep = "_") + input <- input[tmp %in% select_fraction$tmp, ] + input$originalRUN <- input$newRun + input$RUN <- input$originalRUN + input$RUN <- factor(input$RUN, + levels = unique(input$RUN), + labels = seq_along(unique(input$RUN)) + ) + input <- input[, !(colnames(input) %in% c("tmp", "newRun")), + with = FALSE + ] + } } - input + } + input } diff --git a/R/utils_output.R b/R/utils_output.R index 521fdfb1..0704c804 100644 --- a/R/utils_output.R +++ b/R/utils_output.R @@ -1,5 +1,5 @@ #' Post-processing output from MSstats summarization -#' +#' #' @param input `data.table` in MSstats format #' @param summarized output of the `MSstatsSummarizeWithSingleCore` function #' @param processed output of MSstatsSelectFeatures @@ -7,98 +7,119 @@ #' (`summaryMethod` parameter to `dataProcess`) #' @param impute if TRUE, censored missing values were imputed #' (`MBimpute` parameter to `dataProcess`) -#' @param censored_symbol censored missing value indicator +#' @param censored_symbol censored missing value indicator #' (`censoredInt` parameter to `dataProcess`) -#' +#' #' @return list that consists of the following elements: #' \itemize{ -#' \item{FeatureLevelData}{ - feature-level data after processing} +#' \item{FeatureLevelData}{ - feature-level data after processing} #' \item{ProteinLevelData}{ - protein-level (summarized) data} #' \item{SummaryMethod}{ (string) - name of summarization method that was used} #' } -#' +#' #' @export -#' +#' #' @examples -#' raw = DDARawData -#' method = "TMP" -#' cens = "NA" -#' impute = TRUE +#' raw <- DDARawData +#' method <- "TMP" +#' cens <- "NA" +#' impute <- TRUE #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) -#' input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -#' input = MSstatsMergeFractions(input) -#' input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -#' input = MSstatsSelectFeatures(input, "all") -#' processed = getProcessed(input) -#' input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -#' summarized = MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) -#' output = output = MSstatsSummarizationOutput(input, summarized, processed, -#' method, impute, cens) -#' -MSstatsSummarizationOutput = function(input, summarized, processed, - method, impute, censored_symbol) { - LABEL = TotalGroupMeasurements = GROUP = Protein = RUN = NULL - - input = .finalizeInput(input, summarized, method, impute, censored_symbol) - summarized = lapply(summarized, function(x) x[[1]]) - summarized = data.table::rbindlist(summarized) - if (inherits(summarized, "try-error")) { - msg = paste("*** error : can't summarize per subplot with ", - method, ".") - getOption("MSstatsLog")("ERROR", msg) - getOption("MSstatsMsg")("ERROR", msg) - rqall = NULL - rqmodelqc = NULL - workpred = NULL - } else { - input[LABEL == "L", TotalGroupMeasurements := uniqueN(.SD), - by = c("PROTEIN", "GROUP"), - .SDcols = c("FEATURE", "originalRUN")] - cols = intersect(c("PROTEIN", "originalRUN", "RUN", "GROUP", - "GROUP_ORIGINAL", "SUBJECT_ORIGINAL", - "TotalGroupMeasurements", - "NumMeasuredFeature", "MissingPercentage", - "more50missing", "NumImputedFeature"), - colnames(input)) - merge_col = ifelse(is.element("RUN", colnames(summarized)), - "RUN", "SUBJECT_ORIGINAL") - lab = unique(input[LABEL == "L", cols, with = FALSE]) - if (nlevels(input$LABEL) > 1) { - lab = lab[GROUP != 0] - } - lab = lab[, colnames(lab) != "GROUP", with = FALSE] - rqall = merge(summarized, lab, by.x = c(merge_col, "Protein"), - by.y = c(merge_col, "PROTEIN")) - data.table::setnames(rqall, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL"), - c("GROUP", "SUBJECT"), skip_absent = TRUE) - - rqall$GROUP = factor(as.character(rqall$GROUP)) - rqall$Protein = factor(rqall$Protein) - rqmodelqc = summarized$ModelQC - } - - if (is.element("RUN", colnames(rqall)) & !is.null(rqall)) { - rqall = rqall[order(Protein, as.numeric(as.character(RUN))), ] - rownames(rqall) = NULL - } - output_cols = intersect(c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE", - "LABEL", "GROUP", "RUN", "SUBJECT", "FRACTION", - "originalRUN", "censored", "INTENSITY", "ABUNDANCE", - "newABUNDANCE", "predicted", "feature_quality", - "is_outlier", "remove"), colnames(input)) - input = input[, output_cols, with = FALSE] - - if (is.element("remove", colnames(processed))) { - processed = processed[(remove), - intersect(output_cols, - colnames(processed)), with = FALSE] - input = rbind(input, processed, fill = TRUE) +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +#' input <- MSstatsMergeFractions(input) +#' input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +#' input <- MSstatsSelectFeatures(input, "all") +#' processed <- getProcessed(input) +#' input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +#' summarized <- MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) +#' output <- output <- MSstatsSummarizationOutput( +#' input, summarized, processed, +#' method, impute, cens +#' ) +#' +MSstatsSummarizationOutput <- function(input, summarized, processed, + method, impute, censored_symbol) { + LABEL <- TotalGroupMeasurements <- GROUP <- Protein <- RUN <- NULL + + input <- .finalizeInput(input, summarized, method, impute, censored_symbol) + summarized <- lapply(summarized, function(x) x[[1]]) + summarized <- data.table::rbindlist(summarized) + if (inherits(summarized, "try-error")) { + msg <- paste( + "*** error : can't summarize per subplot with ", + method, "." + ) + getOption("MSstatsLog")("ERROR", msg) + getOption("MSstatsMsg")("ERROR", msg) + rqall <- NULL + rqmodelqc <- NULL + workpred <- NULL + } else { + input[LABEL == "L", TotalGroupMeasurements := uniqueN(.SD), + by = c("PROTEIN", "GROUP"), + .SDcols = c("FEATURE", "originalRUN") + ] + cols <- intersect( + c( + "PROTEIN", "originalRUN", "RUN", "GROUP", + "GROUP_ORIGINAL", "SUBJECT_ORIGINAL", + "TotalGroupMeasurements", + "NumMeasuredFeature", "MissingPercentage", + "more50missing", "NumImputedFeature" + ), + colnames(input) + ) + merge_col <- ifelse(is.element("RUN", colnames(summarized)), + "RUN", "SUBJECT_ORIGINAL" + ) + lab <- unique(input[LABEL == "L", cols, with = FALSE]) + if (nlevels(input$LABEL) > 1) { + lab <- lab[GROUP != 0] } - list(FeatureLevelData = as.data.frame(input), - ProteinLevelData = as.data.frame(rqall), - SummaryMethod = method) - + lab <- lab[, colnames(lab) != "GROUP", with = FALSE] + rqall <- merge(summarized, lab, + by.x = c(merge_col, "Protein"), + by.y = c(merge_col, "PROTEIN") + ) + data.table::setnames(rqall, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL"), + c("GROUP", "SUBJECT"), + skip_absent = TRUE + ) + + rqall$GROUP <- factor(as.character(rqall$GROUP)) + rqall$Protein <- factor(rqall$Protein) + rqmodelqc <- summarized$ModelQC + } + + if (is.element("RUN", colnames(rqall)) & !is.null(rqall)) { + rqall <- rqall[order(Protein, as.numeric(as.character(RUN))), ] + rownames(rqall) <- NULL + } + output_cols <- intersect(c( + "PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE", + "LABEL", "GROUP", "RUN", "SUBJECT", "FRACTION", + "originalRUN", "censored", "INTENSITY", "ABUNDANCE", + "newABUNDANCE", "predicted", "feature_quality", + "is_outlier", "remove" + ), colnames(input)) + input <- input[, output_cols, with = FALSE] + + if (is.element("remove", colnames(processed))) { + processed <- processed[(remove), + intersect( + output_cols, + colnames(processed) + ), + with = FALSE + ] + input <- rbind(input, processed, fill = TRUE) + } + list( + FeatureLevelData = as.data.frame(input), + ProteinLevelData = as.data.frame(rqall), + SummaryMethod = method + ) } @@ -109,79 +130,85 @@ MSstatsSummarizationOutput = function(input, summarized, processed, #' @param impute if TRUE, censored missing values were imputed #' @param censored_symbol censored missing value indicator #' @keywords internal -.finalizeInput = function(input, summarized, method, impute, censored_symbol) { - if (method == "TMP") { - input = .finalizeTMP(input, censored_symbol, impute, summarized) - } else { - input = .finalizeLinear(input, censored_symbol) - } - input +.finalizeInput <- function(input, summarized, method, impute, censored_symbol) { + if (method == "TMP") { + input <- .finalizeTMP(input, censored_symbol, impute, summarized) + } else { + input <- .finalizeLinear(input, censored_symbol) + } + input } #' Summary statistics for output of TMP-based summarization #' @inheritParams .finalizeInput #' @keywords internal -.finalizeTMP = function(input, censored_symbol, impute, summarized) { - NonMissingStats = NumMeasuredFeature = MissingPercentage = LABEL = NULL - total_features = more50missing = nonmissing_orig = censored = NULL - INTENSITY = newABUNDANCE = NumImputedFeature = NULL - - survival_predictions = lapply(summarized, function(x) x[[2]]) - predicted_survival = data.table::rbindlist(survival_predictions) - if (impute) { - cols = intersect(colnames(input), c("newABUNDANCE", - "cen", "RUN", - "FEATURE", "ref")) - input = merge(input[, colnames(input) != "newABUNDANCE", with = FALSE], - predicted_survival, - by = setdiff(cols, "newABUNDANCE"), - all.x = TRUE) +.finalizeTMP <- function(input, censored_symbol, impute, summarized) { + NonMissingStats <- NumMeasuredFeature <- MissingPercentage <- LABEL <- NULL + total_features <- more50missing <- nonmissing_orig <- censored <- NULL + INTENSITY <- newABUNDANCE <- NumImputedFeature <- NULL + + survival_predictions <- lapply(summarized, function(x) x[[2]]) + predicted_survival <- data.table::rbindlist(survival_predictions) + if (impute) { + cols <- intersect(colnames(input), c( + "newABUNDANCE", + "cen", "RUN", + "FEATURE", "ref" + )) + input <- merge(input[, colnames(input) != "newABUNDANCE", with = FALSE], + predicted_survival, + by = setdiff(cols, "newABUNDANCE"), + all.x = TRUE + ) + } + input[, NonMissingStats := .getNonMissingFilterStats(.SD, censored_symbol)] + input[, NumMeasuredFeature := sum(NonMissingStats), + by = c("PROTEIN", "RUN") + ] + input[, MissingPercentage := 1 - (NumMeasuredFeature / total_features)] + input[, more50missing := MissingPercentage >= 0.5] + if (!is.null(censored_symbol)) { + if (is.element("censored", colnames(input))) { + input[, nonmissing_orig := LABEL == "L" & !censored] + } else { + input[, nonmissing_orig := LABEL == "L" & !is.na(INTENSITY)] } - input[, NonMissingStats := .getNonMissingFilterStats(.SD, censored_symbol)] - input[, NumMeasuredFeature := sum(NonMissingStats), - by = c("PROTEIN", "RUN")] - input[, MissingPercentage := 1 - (NumMeasuredFeature / total_features)] - input[, more50missing := MissingPercentage >= 0.5] - if (!is.null(censored_symbol)) { - if (is.element("censored", colnames(input))) { - input[, nonmissing_orig := LABEL == "L" & !censored] - } else { - input[, nonmissing_orig := LABEL == "L" & !is.na(INTENSITY)] - } - input[, nonmissing_orig := ifelse(is.na(newABUNDANCE), TRUE, nonmissing_orig)] - if (impute) { - input[, NumImputedFeature := sum(LABEL == "L" & !nonmissing_orig), - by = c("PROTEIN", "RUN")] - } else { - input[, NumImputedFeature := 0] - } + input[, nonmissing_orig := ifelse(is.na(newABUNDANCE), TRUE, nonmissing_orig)] + if (impute) { + input[, NumImputedFeature := sum(LABEL == "L" & !nonmissing_orig), + by = c("PROTEIN", "RUN") + ] + } else { + input[, NumImputedFeature := 0] } - input + } + input } #' Summary statistics for linear model-based summarization #' @inheritParams .finalizeInput #' @keywords internal -.finalizeLinear = function(input, censored_symbol) { - NonMissingStats = NumMeasuredFeature = MissingPercentage = NULL - total_features = more50missing = nonmissing_orig = LABEL = NULL - censored = INTENSITY = newABUNDANCE = NumImputedFeature = NULL - - input[, NonMissingStats := .getNonMissingFilterStats(.SD, censored_symbol)] - input[, NumMeasuredFeature := sum(NonMissingStats), - by = c("PROTEIN", "RUN")] - input[, MissingPercentage := 1 - (NumMeasuredFeature / total_features)] - input[, more50missing := MissingPercentage >= 0.5] - if (!is.null(censored_symbol)) { - if (is.element("censored", colnames(input))) { - input[, nonmissing_orig := LABEL == "L" & !censored] - } else { - input[, nonmissing_orig := LABEL == "L" & !is.na(INTENSITY)] - } - input[, nonmissing_orig := ifelse(is.na(newABUNDANCE), TRUE, nonmissing_orig)] - input[, NumImputedFeature := 0] +.finalizeLinear <- function(input, censored_symbol) { + NonMissingStats <- NumMeasuredFeature <- MissingPercentage <- NULL + total_features <- more50missing <- nonmissing_orig <- LABEL <- NULL + censored <- INTENSITY <- newABUNDANCE <- NumImputedFeature <- NULL + + input[, NonMissingStats := .getNonMissingFilterStats(.SD, censored_symbol)] + input[, NumMeasuredFeature := sum(NonMissingStats), + by = c("PROTEIN", "RUN") + ] + input[, MissingPercentage := 1 - (NumMeasuredFeature / total_features)] + input[, more50missing := MissingPercentage >= 0.5] + if (!is.null(censored_symbol)) { + if (is.element("censored", colnames(input))) { + input[, nonmissing_orig := LABEL == "L" & !censored] + } else { + input[, nonmissing_orig := LABEL == "L" & !is.na(INTENSITY)] } - input + input[, nonmissing_orig := ifelse(is.na(newABUNDANCE), TRUE, nonmissing_orig)] + input[, NumImputedFeature := 0] + } + input } diff --git a/R/utils_plots_common.R b/R/utils_plots_common.R index 68876574..c90e8701 100644 --- a/R/utils_plots_common.R +++ b/R/utils_plots_common.R @@ -1,5 +1,5 @@ #' Theme for MSstats plots -#' +#' #' @param type type of a plot #' @param x.axis.size size of text on the x axis #' @param y.axis.size size of text on the y axis @@ -12,121 +12,128 @@ #' @param text_hjust hjust parameter for x axis text (for condition and comparison plots) #' @param text_vjust vjust parameter for x axis text (for condition and comparison plots) #' @param ... additional parameters passed on to ggplot2::theme() -#' +#' #' @import ggplot2 #' @export -#' -theme_msstats = function( - type, x.axis.size = 10, y.axis.size = 10, legend_size = 13, +#' +theme_msstats <- function( + type, x.axis.size = 10, y.axis.size = 10, legend_size = 13, strip_background = element_rect(fill = "gray95"), strip_text_x = element_text(colour = c("black"), size = 14), - legend_position = "top", legend_box = "vertical", text_angle = 0, text_hjust = NULL, text_vjust = NULL, - ... -) { - if (type %in% c("CONDITIONPLOT", "COMPARISONPLOT")) { - ggplot2::theme( - panel.background = element_rect(fill = 'white', colour = "black"), - axis.title.x = element_text(size = x.axis.size + 5, vjust = -0.4), - axis.title.y = element_text(size = y.axis.size + 5, vjust = 0.3), - axis.ticks = element_line(colour = "black"), - title = element_text(size = x.axis.size + 8, vjust = 1.5), - panel.grid.major.y = element_line(colour = "grey95"), - panel.grid.minor.y = element_blank(), - axis.text.y = element_text(size = y.axis.size, colour = "black"), - axis.text.x = element_text(size = x.axis.size, colour = "black", - angle = text_angle, hjust = text_hjust, - vjust = text_vjust), - ... - ) - } else { - ggplot2::theme( - panel.background = element_rect(fill = 'white', colour = "black"), - legend.key = element_rect(fill = 'white', colour = 'white'), - panel.grid.minor = element_blank(), - strip.background = strip_background, - axis.text.x = element_text(size = x.axis.size, colour = "black"), - axis.text.y = element_text(size = y.axis.size, colour = "black"), - axis.title.x = element_text(size = x.axis.size + 5, vjust = -0.4), - axis.title.y = element_text(size = y.axis.size + 5, vjust = 0.3), - axis.ticks = element_line(colour = "black"), - title = element_text(size = x.axis.size + 8, vjust = 1.5), - strip.text.x = strip_text_x, - legend.position = legend_position, - legend.box = legend_box, - legend.text = element_text(size = legend_size), - ... - ) - } + legend_position = "top", legend_box = "vertical", text_angle = 0, text_hjust = NULL, text_vjust = NULL, + ...) { + if (type %in% c("CONDITIONPLOT", "COMPARISONPLOT")) { + ggplot2::theme( + panel.background = element_rect(fill = "white", colour = "black"), + axis.title.x = element_text(size = x.axis.size + 5, vjust = -0.4), + axis.title.y = element_text(size = y.axis.size + 5, vjust = 0.3), + axis.ticks = element_line(colour = "black"), + title = element_text(size = x.axis.size + 8, vjust = 1.5), + panel.grid.major.y = element_line(colour = "grey95"), + panel.grid.minor.y = element_blank(), + axis.text.y = element_text(size = y.axis.size, colour = "black"), + axis.text.x = element_text( + size = x.axis.size, colour = "black", + angle = text_angle, hjust = text_hjust, + vjust = text_vjust + ), + ... + ) + } else { + ggplot2::theme( + panel.background = element_rect(fill = "white", colour = "black"), + legend.key = element_rect(fill = "white", colour = "white"), + panel.grid.minor = element_blank(), + strip.background = strip_background, + axis.text.x = element_text(size = x.axis.size, colour = "black"), + axis.text.y = element_text(size = y.axis.size, colour = "black"), + axis.title.x = element_text(size = x.axis.size + 5, vjust = -0.4), + axis.title.y = element_text(size = y.axis.size + 5, vjust = 0.3), + axis.ticks = element_line(colour = "black"), + title = element_text(size = x.axis.size + 8, vjust = 1.5), + strip.text.x = strip_text_x, + legend.position = legend_position, + legend.box = legend_box, + legend.text = element_text(size = legend_size), + ... + ) + } } #' Get proteins based on names or integer IDs -#' +#' #' @param chosen_proteins protein names or integers IDs #' @param all_proteins all unique proteins -#' +#' #' @return character -#' +#' #' @export -getSelectedProteins = function(chosen_proteins, all_proteins) { - if (is.character(chosen_proteins)) { - selected_proteins = chosen_proteins - missing_proteins = setdiff(selected_proteins, all_proteins) - if (length(missing_proteins) > 0) { - stop(paste("Please check protein name. Dataset does not have this protein. -", - toString(missing_proteins), sep = " ")) - } +getSelectedProteins <- function(chosen_proteins, all_proteins) { + if (is.character(chosen_proteins)) { + selected_proteins <- chosen_proteins + missing_proteins <- setdiff(selected_proteins, all_proteins) + if (length(missing_proteins) > 0) { + stop(paste("Please check protein name. Dataset does not have this protein. -", + toString(missing_proteins), + sep = " " + )) } - if (is.numeric(chosen_proteins)) { - selected_proteins <- all_proteins[chosen_proteins] - if (length(all_proteins) < max(chosen_proteins)) { - stop(paste("Please check your selection of proteins. There are ", - length(all_proteins)," proteins in this dataset.")) - } + } + if (is.numeric(chosen_proteins)) { + selected_proteins <- all_proteins[chosen_proteins] + if (length(all_proteins) < max(chosen_proteins)) { + stop(paste( + "Please check your selection of proteins. There are ", + length(all_proteins), " proteins in this dataset." + )) } - selected_proteins + } + selected_proteins } #' Save a plot to pdf file -#' +#' #' @inheritParams .saveTable #' @param width width of a plot #' @param height height of a plot -#' +#' #' @return NULL -#' +#' #' @export -#' -savePlot = function(name_base, file_name, width, height) { - if (name_base != FALSE) { - width_inches = .convertPixelsToInches(width) - height_inches = .convertPixelsToInches(height) - file_path = getFileName(name_base, file_name, - width, height) - file_path = paste0(file_path,".pdf") - pdf(file_path, width = width_inches, height = height_inches) - } - NULL +#' +savePlot <- function(name_base, file_name, width, height) { + if (name_base != FALSE) { + width_inches <- .convertPixelsToInches(width) + height_inches <- .convertPixelsToInches(height) + file_path <- getFileName( + name_base, file_name, + width, height + ) + file_path <- paste0(file_path, ".pdf") + pdf(file_path, width = width_inches, height = height_inches) + } + NULL } -.convertPixelsToInches = function(pixels) { - # Convert pixels to inches (using standard 72 DPI) - inches = pixels / 72 - return(inches) +.convertPixelsToInches <- function(pixels) { + # Convert pixels to inches (using standard 72 DPI) + inches <- pixels / 72 + return(inches) } -getFileName = function(name_base, file_name, width, height) { - all_files = list.files(".") - if(file_name == 'ProfilePlot'){ - num_same_name = sum(grepl(paste0("^", name_base, file_name, "_[0-9]?"), all_files)) - } else { - num_same_name = sum(grepl(paste0("^", name_base, file_name, "[0-9]?"), all_files)) - } - if (num_same_name > 0) { - file_name = paste(file_name, num_same_name + 1, sep = "_") - } - file_path = paste0(name_base, file_name) - return(file_path) +getFileName <- function(name_base, file_name, width, height) { + all_files <- list.files(".") + if (file_name == "ProfilePlot") { + num_same_name <- sum(grepl(paste0("^", name_base, file_name, "_[0-9]?"), all_files)) + } else { + num_same_name <- sum(grepl(paste0("^", name_base, file_name, "[0-9]?"), all_files)) + } + if (num_same_name > 0) { + file_name <- paste(file_name, num_same_name + 1, sep = "_") + } + file_path <- paste0(name_base, file_name) + return(file_path) } @@ -137,15 +144,15 @@ getFileName = function(name_base, file_name, width, height) { #' an integer will be appended to this name #' @return NULL #' @keywords internal -.saveTable = function(input, name_base, file_name) { - if (name_base != FALSE) { - all_files = list.files(".") - num_same_name = sum(grepl(paste0("^", file_name, "_[0-9]?"), all_files)) - if (num_same_name > 0) { - file_name = paste(file_name, num_same_name + 1, sep = "_") - } - file_path = paste0(paste0(name_base, "/"), file_name, ".pdf") - data.table::fwrite(input, file = file_path) +.saveTable <- function(input, name_base, file_name) { + if (name_base != FALSE) { + all_files <- list.files(".") + num_same_name <- sum(grepl(paste0("^", file_name, "_[0-9]?"), all_files)) + if (num_same_name > 0) { + file_name <- paste(file_name, num_same_name + 1, sep = "_") } - NULL + file_path <- paste0(paste0(name_base, "/"), file_name, ".pdf") + data.table::fwrite(input, file = file_path) + } + NULL } diff --git a/R/utils_statistics.R b/R/utils_statistics.R index 24319426..fe52e81b 100644 --- a/R/utils_statistics.R +++ b/R/utils_statistics.R @@ -2,11 +2,11 @@ #' @param input data.table #' @return TRUE invisibly after successful logging #' @keywords internal -.logDatasetInformation = function(input) { - .logSummaryStatistics(input) - .checkSingleLabelProteins(input) - .logMissingness(input) - invisible(TRUE) +.logDatasetInformation <- function(input) { + .logSummaryStatistics(input) + .checkSingleLabelProteins(input) + .logMissingness(input) + invisible(TRUE) } @@ -14,81 +14,103 @@ #' @param input data.table #' @return TRUE invisibly #' @keywords internal -.logSummaryStatistics = function(input) { - RUN = SUBJECT_ORIGINAL = FRACTION = GROUP_ORIGINAL = NumRuns = NULL - NumBioReplicates = NumFractions = NULL - - PEPTIDE = FEATURE = PROTEIN = feature_count = NULL - - num_proteins = data.table::uniqueN(input$PROTEIN) - peptides_per_protein = input[, list(peptide_count = data.table::uniqueN(PEPTIDE)), - by = "PROTEIN"] - unique_peptide_feature_pairs = unique(input[, .(PEPTIDE, FEATURE)]) - features_per_peptide = unique_peptide_feature_pairs[, .(feature_count = .N), - by = PEPTIDE] - features_per_protein = input[, list(feature_count = data.table::uniqueN(FEATURE)), - by = "PROTEIN"] - features_per_protein = features_per_protein[feature_count == 1L, PROTEIN] - - pep_per_prot = range(peptides_per_protein$peptide_count) - feat_per_pept = range(features_per_peptide$feature_count) - counts_msg = paste("", paste("# proteins:", num_proteins), - paste("# peptides per protein:", - paste(pep_per_prot, sep = "-", collapse = "-")), - paste("# features per peptide:", - paste(feat_per_pept, sep = "-", collapse = "-")), - sep = "\n ", collapse = "\n ") - getOption("MSstatsMsg")("INFO", counts_msg) - getOption("MSstatsLog")("INFO", counts_msg) - - if (length(features_per_protein) > 0) { - single_features = unique(as.character(features_per_protein)) - n_feat = min(length(single_features), 5) - msg = paste("Some proteins have only one feature:", "\n", - paste(unique(as.character(features_per_protein))[1:n_feat], - sep = ",\n ", collapse = ",\n "), - "...") - getOption("MSstatsLog")("INFO", msg) - getOption("MSstatsMsg")("INFO", msg) - } - - samples_info = input[, list(NumRuns = data.table::uniqueN(RUN), - NumBioReplicates = data.table::uniqueN(SUBJECT_ORIGINAL), - NumFractions = data.table::uniqueN(FRACTION)), - by = "GROUP_ORIGINAL"] - samples_info = samples_info[ - , - list(GROUP_ORIGINAL, NumRuns, NumBioReplicates, - NumTechReplicates = as.integer( - round(NumRuns / (NumBioReplicates * NumFractions) - )))] - samples_info = data.table::dcast(data.table::melt(samples_info, - id.vars = "GROUP_ORIGINAL"), - variable ~ GROUP_ORIGINAL) - colnames(samples_info)[1] = "" - samples_info[, 1] = c("# runs", "# bioreplicates", "# tech. replicates") - - samples_info = rbind(data.table::data.table(t(colnames(samples_info))), - samples_info, use.names = FALSE) - samples_msg = apply(samples_info, 2, .nicePrint) - samples_msg = apply(samples_msg, 1, function(x) paste0(x, collapse = "")) - samples_msg = paste("", samples_msg, sep = "\n") - getOption("MSstatsLog")("INFO", samples_msg) - getOption("MSstatsMsg")("INFO", samples_msg) - invisible(TRUE) +.logSummaryStatistics <- function(input) { + RUN <- SUBJECT_ORIGINAL <- FRACTION <- GROUP_ORIGINAL <- NumRuns <- NULL + NumBioReplicates <- NumFractions <- NULL + + PEPTIDE <- FEATURE <- PROTEIN <- feature_count <- NULL + + num_proteins <- data.table::uniqueN(input$PROTEIN) + peptides_per_protein <- input[, list(peptide_count = data.table::uniqueN(PEPTIDE)), + by = "PROTEIN" + ] + unique_peptide_feature_pairs <- unique(input[, .(PEPTIDE, FEATURE)]) + features_per_peptide <- unique_peptide_feature_pairs[, .(feature_count = .N), + by = PEPTIDE + ] + features_per_protein <- input[, list(feature_count = data.table::uniqueN(FEATURE)), + by = "PROTEIN" + ] + features_per_protein <- features_per_protein[feature_count == 1L, PROTEIN] + + pep_per_prot <- range(peptides_per_protein$peptide_count) + feat_per_pept <- range(features_per_peptide$feature_count) + counts_msg <- paste("", paste("# proteins:", num_proteins), + paste( + "# peptides per protein:", + paste(pep_per_prot, sep = "-", collapse = "-") + ), + paste( + "# features per peptide:", + paste(feat_per_pept, sep = "-", collapse = "-") + ), + sep = "\n ", collapse = "\n " + ) + getOption("MSstatsMsg")("INFO", counts_msg) + getOption("MSstatsLog")("INFO", counts_msg) + + if (length(features_per_protein) > 0) { + single_features <- unique(as.character(features_per_protein)) + n_feat <- min(length(single_features), 5) + msg <- paste( + "Some proteins have only one feature:", "\n", + paste(unique(as.character(features_per_protein))[1:n_feat], + sep = ",\n ", collapse = ",\n " + ), + "..." + ) + getOption("MSstatsLog")("INFO", msg) + getOption("MSstatsMsg")("INFO", msg) + } + + samples_info <- input[, list( + NumRuns = data.table::uniqueN(RUN), + NumBioReplicates = data.table::uniqueN(SUBJECT_ORIGINAL), + NumFractions = data.table::uniqueN(FRACTION) + ), + by = "GROUP_ORIGINAL" + ] + samples_info <- samples_info[ + , + list(GROUP_ORIGINAL, NumRuns, NumBioReplicates, + NumTechReplicates = as.integer( + round(NumRuns / (NumBioReplicates * NumFractions)) + ) + ) + ] + samples_info <- data.table::dcast( + data.table::melt(samples_info, + id.vars = "GROUP_ORIGINAL" + ), + variable ~ GROUP_ORIGINAL + ) + colnames(samples_info)[1] <- "" + samples_info[, 1] <- c("# runs", "# bioreplicates", "# tech. replicates") + + samples_info <- rbind(data.table::data.table(t(colnames(samples_info))), + samples_info, + use.names = FALSE + ) + samples_msg <- apply(samples_info, 2, .nicePrint) + samples_msg <- apply(samples_msg, 1, function(x) paste0(x, collapse = "")) + samples_msg <- paste("", samples_msg, sep = "\n") + getOption("MSstatsLog")("INFO", samples_msg) + getOption("MSstatsMsg")("INFO", samples_msg) + invisible(TRUE) } -#' Print a table nicely +#' Print a table nicely #' @param string_vector character #' @return character #' @keywords internal -.nicePrint = function(string_vector) { - max_chars = max(nchar(string_vector)) - string_vector = sapply(string_vector, - function(x) paste(paste0(rep(" ", max_chars - nchar(x)), collapse = ""), x), - USE.NAMES = FALSE) -} +.nicePrint <- function(string_vector) { + max_chars <- max(nchar(string_vector)) + string_vector <- sapply(string_vector, + function(x) paste(paste0(rep(" ", max_chars - nchar(x)), collapse = ""), x), + USE.NAMES = FALSE + ) +} #' Print proteins with a single label to the log file @@ -96,22 +118,26 @@ #' @param label label ("L" or "H") #' @return TRUE invisibly #' @keywords internal -.logSingleLabeledProteins = function(input, label) { - LABEL = PROTEIN = NULL - - name = ifelse(label == "L", "endogeneous", "reference") - proteins = unique(input[LABEL == label, as.character(PROTEIN)]) - if (length(proteins) > 0) { - n_prot = min(length(proteins), 5) - msg = paste(paste("Some proteins only have", name, - "intensities in label-based experiment", - "Please check or remove these proteins:"), - paste(proteins[1:n_prot], sep = ", \n ", collapse = ", \n "), - "... (see the log file for a full list)") - getOption("MSstatsMsg")("WARN", msg) - getOption("MsstatsLog")("WARN", msg) - } - invisible(TRUE) +.logSingleLabeledProteins <- function(input, label) { + LABEL <- PROTEIN <- NULL + + name <- ifelse(label == "L", "endogeneous", "reference") + proteins <- unique(input[LABEL == label, as.character(PROTEIN)]) + if (length(proteins) > 0) { + n_prot <- min(length(proteins), 5) + msg <- paste( + paste( + "Some proteins only have", name, + "intensities in label-based experiment", + "Please check or remove these proteins:" + ), + paste(proteins[1:n_prot], sep = ", \n ", collapse = ", \n "), + "... (see the log file for a full list)" + ) + getOption("MSstatsMsg")("WARN", msg) + getOption("MsstatsLog")("WARN", msg) + } + invisible(TRUE) } @@ -119,20 +145,22 @@ #' @param input data.table #' @return TRUE invisibly #' @keywords internal -.checkSingleLabelProteins = function(input) { - LABEL = label_count = NULL - - if (data.table::uniqueN(input$LABEL) == 2) { - labels_by_protein = unique(input[, - list(LABEL, - label_count = data.table::uniqueN(LABEL)), - by = "PROTEIN"]) - labels_by_protein = labels_by_protein[label_count == 1L, ] - - .logSingleLabeledProteins(labels_by_protein, "L") - .logSingleLabeledProteins(labels_by_protein, "H") - } - invisible(TRUE) +.checkSingleLabelProteins <- function(input) { + LABEL <- label_count <- NULL + + if (data.table::uniqueN(input$LABEL) == 2) { + labels_by_protein <- unique(input[, + list(LABEL, + label_count = data.table::uniqueN(LABEL) + ), + by = "PROTEIN" + ]) + labels_by_protein <- labels_by_protein[label_count == 1L, ] + + .logSingleLabeledProteins(labels_by_protein, "L") + .logSingleLabeledProteins(labels_by_protein, "H") + } + invisible(TRUE) } @@ -140,47 +168,56 @@ #' @param input data.table #' @return TRUE invisibly #' @keywords internal -.logMissingness = function(input) { - ABUNDANCE = AllMissing = NumMissing = NumTotal = AnyAllMissing = NULL - FEATURE = FractionMissing = RUN = NULL - - input[, is_na_abundance := is.na(ABUNDANCE)] - missing = input[, .( - NumMissing = sum(is_na_abundance), - NumTotal = .N - ), by = .(LABEL, GROUP, FEATURE)] - missing[, AllMissing := (NumMissing == NumTotal)] - any_all_missing = missing[AllMissing == TRUE, - .(AnyAllMissing = TRUE), - by = .(LABEL, FEATURE)] - missing = merge(missing, any_all_missing, - by = c("LABEL", "FEATURE"), all.x = TRUE) - missing[is.na(AnyAllMissing), AnyAllMissing := FALSE] - missing_in_any = as.character(missing[(AnyAllMissing), FEATURE]) - missing_by_run = input[, .( - NumMissing = sum(is_na_abundance), - NumTotal = .N - ), by = "RUN"] - missing_by_run[, FractionMissing := NumMissing / NumTotal] - missing_by_run = as.character(missing_by_run[FractionMissing > 0.75, - unique(RUN)]) - - if (length(missing_in_any) > 0) { - num_features = min(length(missing_in_any), 5) - msg = paste("Some features are completely", - "missing in at least one condition: ", "\n", - paste(unique(as.character(missing_in_any))[1:num_features], - sep = ",\n ", - collapse = ",\n "), "...") - getOption("MSstatsLog")("INFO", msg) - getOption("MSstatsMsg")("INFO", msg) - } - - if (length(missing_by_run) > 0) { - msg = paste("The following runs have more than 75% missing values:", - paste(missing_by_run, sep = ",\n ", collapse = ",\n ")) - getOption("MSstatsLog")("INFO", msg) - getOption("MSstatsMsg")("INFO", msg) - } - invisible(TRUE) +.logMissingness <- function(input) { + ABUNDANCE <- AllMissing <- NumMissing <- NumTotal <- AnyAllMissing <- NULL + FEATURE <- FractionMissing <- RUN <- NULL + + input[, is_na_abundance := is.na(ABUNDANCE)] + missing <- input[, .( + NumMissing = sum(is_na_abundance), + NumTotal = .N + ), by = .(LABEL, GROUP, FEATURE)] + missing[, AllMissing := (NumMissing == NumTotal)] + any_all_missing <- missing[AllMissing == TRUE, + .(AnyAllMissing = TRUE), + by = .(LABEL, FEATURE) + ] + missing <- merge(missing, any_all_missing, + by = c("LABEL", "FEATURE"), all.x = TRUE + ) + missing[is.na(AnyAllMissing), AnyAllMissing := FALSE] + missing_in_any <- as.character(missing[(AnyAllMissing), FEATURE]) + missing_by_run <- input[, .( + NumMissing = sum(is_na_abundance), + NumTotal = .N + ), by = "RUN"] + missing_by_run[, FractionMissing := NumMissing / NumTotal] + missing_by_run <- as.character(missing_by_run[ + FractionMissing > 0.75, + unique(RUN) + ]) + + if (length(missing_in_any) > 0) { + num_features <- min(length(missing_in_any), 5) + msg <- paste( + "Some features are completely", + "missing in at least one condition: ", "\n", + paste(unique(as.character(missing_in_any))[1:num_features], + sep = ",\n ", + collapse = ",\n " + ), "..." + ) + getOption("MSstatsLog")("INFO", msg) + getOption("MSstatsMsg")("INFO", msg) + } + + if (length(missing_by_run) > 0) { + msg <- paste( + "The following runs have more than 75% missing values:", + paste(missing_by_run, sep = ",\n ", collapse = ",\n ") + ) + getOption("MSstatsLog")("INFO", msg) + getOption("MSstatsMsg")("INFO", msg) + } + invisible(TRUE) } diff --git a/R/utils_summarization.R b/R/utils_summarization.R index ec7433d9..82b8b090 100644 --- a/R/utils_summarization.R +++ b/R/utils_summarization.R @@ -3,60 +3,72 @@ #' @param remove50missing if TRUE, proteins with more than 50% missing values #' in all runs will not be summarized #' @return data.table -#' @keywords internal -.isSummarizable = function(input, remove50missing) { - n_obs_run = RUN = NULL - - if (all(is.na(input$newABUNDANCE) | input$newABUNDANCE == 0)) { - msg = paste("Can't summarize for protein", unique(input$PROTEIN), - "because all measurements are missing or censored.") - getOption("MSstatsMsg")("INFO", msg) - getOption("MSstatsLog")("INFO", msg) - return(NULL) - } - - if (all(is.na(input$n_obs) | input$n_obs == 0)) { - msg = paste("Can't summarize for protein", unique(input$PROTEIN), - "because all measurements are missing or censored.") - getOption("MSstatsMsg")("INFO", msg) - getOption("MSstatsLog")("INFO", msg) - return(NULL) - } - - if (all(input$n_obs == 1 | is.na(input$n_obs))) { - msg = paste("Can't summarize for protein", unique(input$PROTEIN), - "because features have only one measurement across MS runs.") - getOption("MSstatsMsg")("INFO", msg) - getOption("MSstatsLog")("INFO", msg) - return(NULL) - } - - if (all(is.na(input$newABUNDANCE) | input$newABUNDANCE == 0) | nrow(input) == 0) { - msg = paste("After removing features which has only 1 measurement,", - "Can't summarize for protein", unique(input$PROTEIN), - "because all measurements are missing or censored.") - getOption("MSstatsMsg")("INFO", msg) - getOption("MSstatsLog")("INFO", msg) - return(NULL) - } - - missing_runs = setdiff(unique(input$RUN), - unique(input[n_obs_run == 0 | is.na(n_obs_run), RUN])) - if (length(missing_runs) > 0 & length(intersect(missing_runs, as.character(unique(input$RUN))))) { - input = input[n_obs_run > 0 & !is.na(n_obs_run), ] - } - - if (remove50missing) { - if (all(input$prop_features <= 0.5 | is.na(input$prop_features))) { - msg = paste("Can't summarize for protein", unique(input$PROTEIN), - "because all runs have more than 50% missing values and", - "are removed with the option, remove50missing=TRUE.") - getOption("MSstatsMsg")("INFO", msg) - getOption("MSstatsLog")("INFO", msg) - return(NULL) - } +#' @keywords internal +.isSummarizable <- function(input, remove50missing) { + n_obs_run <- RUN <- NULL + + if (all(is.na(input$newABUNDANCE) | input$newABUNDANCE == 0)) { + msg <- paste( + "Can't summarize for protein", unique(input$PROTEIN), + "because all measurements are missing or censored." + ) + getOption("MSstatsMsg")("INFO", msg) + getOption("MSstatsLog")("INFO", msg) + return(NULL) + } + + if (all(is.na(input$n_obs) | input$n_obs == 0)) { + msg <- paste( + "Can't summarize for protein", unique(input$PROTEIN), + "because all measurements are missing or censored." + ) + getOption("MSstatsMsg")("INFO", msg) + getOption("MSstatsLog")("INFO", msg) + return(NULL) + } + + if (all(input$n_obs == 1 | is.na(input$n_obs))) { + msg <- paste( + "Can't summarize for protein", unique(input$PROTEIN), + "because features have only one measurement across MS runs." + ) + getOption("MSstatsMsg")("INFO", msg) + getOption("MSstatsLog")("INFO", msg) + return(NULL) + } + + if (all(is.na(input$newABUNDANCE) | input$newABUNDANCE == 0) | nrow(input) == 0) { + msg <- paste( + "After removing features which has only 1 measurement,", + "Can't summarize for protein", unique(input$PROTEIN), + "because all measurements are missing or censored." + ) + getOption("MSstatsMsg")("INFO", msg) + getOption("MSstatsLog")("INFO", msg) + return(NULL) + } + + missing_runs <- setdiff( + unique(input$RUN), + unique(input[n_obs_run == 0 | is.na(n_obs_run), RUN]) + ) + if (length(missing_runs) > 0 & length(intersect(missing_runs, as.character(unique(input$RUN))))) { + input <- input[n_obs_run > 0 & !is.na(n_obs_run), ] + } + + if (remove50missing) { + if (all(input$prop_features <= 0.5 | is.na(input$prop_features))) { + msg <- paste( + "Can't summarize for protein", unique(input$PROTEIN), + "because all runs have more than 50% missing values and", + "are removed with the option, remove50missing=TRUE." + ) + getOption("MSstatsMsg")("INFO", msg) + getOption("MSstatsLog")("INFO", msg) + return(NULL) } - input + } + input } @@ -66,21 +78,23 @@ #' @inheritParams MSstatsSummarizeWithSingleCore #' @return data.table #' @keywords internal -.runTukey = function(input, is_labeled, censored_symbol, remove50missing) { - Protein = RUN = newABUNDANCE = NULL - - if (nlevels(input$FEATURE) > 1) { - tmp_result = .fitTukey(input) - } else { - if (is_labeled) { - tmp_result = .adjustLRuns(input, TRUE) - } else { - tmp_result = input[input$LABEL == "L", - list(RUN, LogIntensities = newABUNDANCE)] - } +.runTukey <- function(input, is_labeled, censored_symbol, remove50missing) { + Protein <- RUN <- newABUNDANCE <- NULL + + if (nlevels(input$FEATURE) > 1) { + tmp_result <- .fitTukey(input) + } else { + if (is_labeled) { + tmp_result <- .adjustLRuns(input, TRUE) + } else { + tmp_result <- input[ + input$LABEL == "L", + list(RUN, LogIntensities = newABUNDANCE) + ] } - tmp_result[, Protein := unique(input$PROTEIN)] - tmp_result + } + tmp_result[, Protein := unique(input$PROTEIN)] + tmp_result } @@ -88,20 +102,22 @@ #' @inheritParams .runTukey #' @return data.table #' @keywords internal -.fitTukey = function(input) { - LABEL = RUN = newABUNDANCE = NULL - - features = as.character(unique(input$FEATURE)) - wide = data.table::dcast(LABEL + RUN ~ FEATURE, data = input, - value.var = "newABUNDANCE", keep = TRUE) - tmp_fitted = median_polish_summary(as.matrix(wide[, features, with = FALSE])) - wide[, newABUNDANCE := tmp_fitted] - tmp_result = wide[, list(LABEL, RUN, newABUNDANCE)] - - if (data.table::uniqueN(input$LABEL) == 2) { - tmp_result = .adjustLRuns(tmp_result) - } - tmp_result[, list(RUN, LogIntensities = newABUNDANCE)] +.fitTukey <- function(input) { + LABEL <- RUN <- newABUNDANCE <- NULL + + features <- as.character(unique(input$FEATURE)) + wide <- data.table::dcast(LABEL + RUN ~ FEATURE, + data = input, + value.var = "newABUNDANCE", keep = TRUE + ) + tmp_fitted <- median_polish_summary(as.matrix(wide[, features, with = FALSE])) + wide[, newABUNDANCE := tmp_fitted] + tmp_result <- wide[, list(LABEL, RUN, newABUNDANCE)] + + if (data.table::uniqueN(input$LABEL) == 2) { + tmp_result <- .adjustLRuns(tmp_result) + } + tmp_result[, list(RUN, LogIntensities = newABUNDANCE)] } @@ -111,19 +127,19 @@ #' @return data.table #' @importFrom stats median #' @keywords internal -.adjustLRuns = function(input, rename = FALSE) { - LABEL = newABUNDANCE = RUN = newABUNDANCE.h = NULL - - h_runs = input[LABEL == "H", list(RUN, newABUNDANCE)] - h_median = median(input[LABEL == "H", newABUNDANCE], na.rm = TRUE) - input = input[LABEL == "L"] - input = merge(input[, list(RUN, newABUNDANCE)], h_runs, by = "RUN", suffixes = c("", ".h")) - input[, newABUNDANCE := newABUNDANCE - newABUNDANCE.h + h_median] - if (rename) { - input[, list(RUN, LogIntensities = newABUNDANCE)] - } else { - input[, list(RUN, newABUNDANCE)] - } +.adjustLRuns <- function(input, rename = FALSE) { + LABEL <- newABUNDANCE <- RUN <- newABUNDANCE.h <- NULL + + h_runs <- input[LABEL == "H", list(RUN, newABUNDANCE)] + h_median <- median(input[LABEL == "H", newABUNDANCE], na.rm = TRUE) + input <- input[LABEL == "L"] + input <- merge(input[, list(RUN, newABUNDANCE)], h_runs, by = "RUN", suffixes = c("", ".h")) + input[, newABUNDANCE := newABUNDANCE - newABUNDANCE.h + h_median] + if (rename) { + input[, list(RUN, LogIntensities = newABUNDANCE)] + } else { + input[, list(RUN, newABUNDANCE)] + } } @@ -131,18 +147,18 @@ #' @inheritParams .runTukey #' @return data.table #' @keywords internal -.getNonMissingFilterStats = function(input, censored_symbol) { - if (!is.null(censored_symbol)) { - if (censored_symbol == "NA") { - nonmissing_filter = input$LABEL == "L" & !is.na(input$newABUNDANCE) & !input$censored - } else { - nonmissing_filter = input$LABEL == "L" & !is.na(input$newABUNDANCE) & !input$censored - } +.getNonMissingFilterStats <- function(input, censored_symbol) { + if (!is.null(censored_symbol)) { + if (censored_symbol == "NA") { + nonmissing_filter <- input$LABEL == "L" & !is.na(input$newABUNDANCE) & !input$censored } else { - nonmissing_filter = input$LABEL == "L" & !is.na(input$INTENSITY) + nonmissing_filter <- input$LABEL == "L" & !is.na(input$newABUNDANCE) & !input$censored } - nonmissing_filter = nonmissing_filter & input$n_obs_run > 0 & input$n_obs > 1 - nonmissing_filter + } else { + nonmissing_filter <- input$LABEL == "L" & !is.na(input$INTENSITY) + } + nonmissing_filter <- nonmissing_filter & input$n_obs_run > 0 & input$n_obs > 1 + nonmissing_filter } @@ -154,27 +170,29 @@ #' @return lm or merMod #' @importFrom stats lm #' @keywords internal -.fitLinearModel = function(input, is_single_feature, is_labeled, - equal_variances) { - if (!is_labeled) { - if (is_single_feature) { - linear_model = lm(ABUNDANCE ~ RUN , data = input) - } else { - linear_model = lm(ABUNDANCE ~ FEATURE + RUN , data = input) - } +.fitLinearModel <- function(input, is_single_feature, is_labeled, + equal_variances) { + if (!is_labeled) { + if (is_single_feature) { + linear_model <- lm(ABUNDANCE ~ RUN, data = input) } else { - if (is_single_feature) { - linear_model = lm(ABUNDANCE ~ RUN + ref , data = input) - } else { - linear_model = lm(ABUNDANCE ~ FEATURE + RUN + ref, data = input) - } + linear_model <- lm(ABUNDANCE ~ FEATURE + RUN, data = input) } - if (!equal_variances) { - linear_model = .updateUnequalVariances(input = input, - fit = linear_model, - num_iter = 1) + } else { + if (is_single_feature) { + linear_model <- lm(ABUNDANCE ~ RUN + ref, data = input) + } else { + linear_model <- lm(ABUNDANCE ~ FEATURE + RUN + ref, data = input) } - linear_model + } + if (!equal_variances) { + linear_model <- .updateUnequalVariances( + input = input, + fit = linear_model, + num_iter = 1 + ) + } + linear_model } @@ -186,30 +204,31 @@ #' @importFrom stats loess resid lm formula #' @return merMod #' @keywords internal -.updateUnequalVariances = function(input, fit, num_iter) { - weight = NULL - - for (i in seq_len(num_iter)) { - if (i == 1) { - abs.resids = data.frame(abs.resids = abs(fit$residuals)) - fitted = data.frame(fitted = fit$fitted.values) - input = data.frame(input, - "abs.resids" = abs.resids, - "fitted" = fitted) - } - fit.loess = loess(abs.resids ~ fitted, data = input) - loess.fitted = data.frame(loess.fitted = fitted(fit.loess)) - input = data.frame(input, "loess.fitted" = loess.fitted) - ## loess fitted valuaes are predicted sd - input$weight = 1 / (input$loess.fitted ^ 2) - input = input[, !(colnames(input) %in% "abs.resids")] - ## re-fit using weight - wls.fit = lm(formula(fit), data = input, weights = weight) - abs.resids = data.frame(abs.resids = abs(wls.fit$residuals)) - input = data.frame(input, "abs.resids" = abs.resids) - input = input[, -which(colnames(input) %in% c("loess.fitted", "weight"))] +.updateUnequalVariances <- function(input, fit, num_iter) { + weight <- NULL + + for (i in seq_len(num_iter)) { + if (i == 1) { + abs.resids <- data.frame(abs.resids = abs(fit$residuals)) + fitted <- data.frame(fitted = fit$fitted.values) + input <- data.frame(input, + "abs.resids" = abs.resids, + "fitted" = fitted + ) } - wls.fit + fit.loess <- loess(abs.resids ~ fitted, data = input) + loess.fitted <- data.frame(loess.fitted = fitted(fit.loess)) + input <- data.frame(input, "loess.fitted" = loess.fitted) + ## loess fitted valuaes are predicted sd + input$weight <- 1 / (input$loess.fitted^2) + input <- input[, !(colnames(input) %in% "abs.resids")] + ## re-fit using weight + wls.fit <- lm(formula(fit), data = input, weights = weight) + abs.resids <- data.frame(abs.resids = abs(wls.fit$residuals)) + input <- data.frame(input, "abs.resids" = abs.resids) + input <- input[, -which(colnames(input) %in% c("loess.fitted", "weight"))] + } + wls.fit } @@ -217,6 +236,6 @@ #' @param input data.table #' @return logical #' @keywords internal -.checkSingleFeature = function(input) { - data.table::uniqueN(input$FEATURE) < 2 +.checkSingleFeature <- function(input) { + data.table::uniqueN(input$FEATURE) < 2 } diff --git a/R/utils_summarization_prepare.R b/R/utils_summarization_prepare.R index 251055da..718dc6d9 100644 --- a/R/utils_summarization_prepare.R +++ b/R/utils_summarization_prepare.R @@ -1,97 +1,100 @@ #' Prepare feature-level data for protein-level summarization -#' +#' #' @param input feature-level data processed by dataProcess subfunctions #' @param method summarization method - `summaryMethod` parameter of the dataProcess function #' @param impute if TRUE, censored missing values will be imputed - `MBimpute` #' parameter of the dataProcess function -#' @param censored_symbol censored missing value indicator - `censoredInt` +#' @param censored_symbol censored missing value indicator - `censoredInt` #' parameter of the dataProcess function -#' @param remove_uninformative_feature_outlier if TRUE, features labeled as -#' outlier of uninformative by the MSstatsSelectFeatures function will not be +#' @param remove_uninformative_feature_outlier if TRUE, features labeled as +#' outlier of uninformative by the MSstatsSelectFeatures function will not be #' used in summarization -#' +#' #' @return data.table -#' +#' #' @export -#' -#' @examples -#' raw = DDARawData -#' method = "TMP" -#' cens = "NA" -#' impute = TRUE +#' +#' @examples +#' raw <- DDARawData +#' method <- "TMP" +#' cens <- "NA" +#' impute <- TRUE #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) #' head(input) -#' -MSstatsPrepareForSummarization = function(input, method, impute, censored_symbol, - remove_uninformative_feature_outlier) { - ABUNDANCE = feature_quality = is_outlier = PROTEIN = NULL - - label = data.table::uniqueN(input$LABEL) == 2 - if (label) { - input[, ref := factor(ifelse(LABEL == "L", RUN, 0))] - } - - if (is.element("remove", colnames(input))) { - input = input[!(remove)] - } - - if (remove_uninformative_feature_outlier & - is.element("feature_quality", colnames(input))) { - input[, ABUNDANCE := ifelse(feature_quality == "Uninformative", - NA, ABUNDANCE)] - input[, ABUNDANCE := ifelse(is_outlier, NA, ABUNDANCE)] - msg = "** Filtered out uninformative features and outliers." - getOption("MSstatsLog")("INFO", msg) - getOption("MSstatsMsg")("INFO", msg) - } - - input = .prepareSummary(input, method, impute, censored_symbol) - input[, PROTEIN := factor(PROTEIN)] - input +#' +MSstatsPrepareForSummarization <- function(input, method, impute, censored_symbol, + remove_uninformative_feature_outlier) { + ABUNDANCE <- feature_quality <- is_outlier <- PROTEIN <- NULL + + label <- data.table::uniqueN(input$LABEL) == 2 + if (label) { + input[, ref := factor(ifelse(LABEL == "L", RUN, 0))] + } + + if (is.element("remove", colnames(input))) { + input <- input[!(remove)] + } + + if (remove_uninformative_feature_outlier & + is.element("feature_quality", colnames(input))) { + input[, ABUNDANCE := ifelse(feature_quality == "Uninformative", + NA, ABUNDANCE + )] + input[, ABUNDANCE := ifelse(is_outlier, NA, ABUNDANCE)] + msg <- "** Filtered out uninformative features and outliers." + getOption("MSstatsLog")("INFO", msg) + getOption("MSstatsMsg")("INFO", msg) + } + + input <- .prepareSummary(input, method, impute, censored_symbol) + input[, PROTEIN := factor(PROTEIN)] + input } #' Get feature-level data to be used in the MSstatsSummarizationOutput function -#' +#' #' @param input data.table processed by dataProcess subfunctions -#' +#' #' @return data.table processed by dataProcess subfunctions -#' +#' #' @export -#' -#' @examples -#' raw = DDARawData -#' method = "TMP" -#' cens = "NA" -#' impute = TRUE +#' +#' @examples +#' raw <- DDARawData +#' method <- "TMP" +#' cens <- "NA" +#' impute <- TRUE #' MSstatsConvert::MSstatsLogsSettings(FALSE) -#' input = MSstatsPrepareForDataProcess(raw, 2, NULL) -#' input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -#' input = MSstatsMergeFractions(input) -#' input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -#' input_all = MSstatsSelectFeatures(input, "all") # all features -#' input_5 = MSstatsSelectFeatures(data.table::copy(input), -#' "topN", top_n = 5) # top 5 features -#' -#' proc1 = getProcessed(input_all) -#' proc2 = getProcessed(input_5) -#' +#' input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +#' input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +#' input <- MSstatsMergeFractions(input) +#' input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +#' input_all <- MSstatsSelectFeatures(input, "all") # all features +#' input_5 <- MSstatsSelectFeatures(data.table::copy(input), +#' "topN", +#' top_n = 5 +#' ) # top 5 features +#' +#' proc1 <- getProcessed(input_all) +#' proc2 <- getProcessed(input_5) +#' #' proc1 #' proc2 -#' -getProcessed = function(input) { - remove = NULL - - if (is.element("remove", colnames(input))) { - if (all(!(input$remove))) { - NULL - } else { - input[(remove)] - } +#' +getProcessed <- function(input) { + remove <- NULL + + if (is.element("remove", colnames(input))) { + if (all(!(input$remove))) { + NULL } else { - NULL + input[(remove)] } + } else { + NULL + } } @@ -102,13 +105,13 @@ getProcessed = function(input) { #' @param censored_symbol "0"/"NA" #' @return data.table #' @keywords internal -.prepareSummary = function(input, method, impute, censored_symbol) { - if (method == "TMP") { - input = .prepareTMP(input, impute, censored_symbol) - } else { - input = .prepareLinear(input, FALSE, censored_symbol) - } - input +.prepareSummary <- function(input, method, impute, censored_symbol) { + if (method == "TMP") { + input <- .prepareTMP(input, impute, censored_symbol) + } else { + input <- .prepareLinear(input, FALSE, censored_symbol) + } + input } @@ -116,21 +119,22 @@ getProcessed = function(input) { #' @inheritParams .prepareSummary #' @return data.table #' @keywords internal -.prepareLinear = function(input, impute, censored_symbol) { - newABUNDANCE = ABUNDANCE = nonmissing = n_obs = n_obs_run = NULL - total_features = FEATURE = prop_features = NULL - - input[, newABUNDANCE := ABUNDANCE] - input[, nonmissing := .getNonMissingFilter(.SD, impute, censored_symbol)] - input[, n_obs := sum(nonmissing), by = c("PROTEIN", "FEATURE")] - # remove feature with 1 measurement - input[, nonmissing := ifelse(n_obs <= 1, FALSE, nonmissing)] - input[, n_obs_run := sum(nonmissing), by = c("PROTEIN", "RUN")] - - input[, total_features := uniqueN(FEATURE), by = "PROTEIN"] - input[, prop_features := sum(nonmissing) / total_features, - by = c("PROTEIN", "RUN")] - input +.prepareLinear <- function(input, impute, censored_symbol) { + newABUNDANCE <- ABUNDANCE <- nonmissing <- n_obs <- n_obs_run <- NULL + total_features <- FEATURE <- prop_features <- NULL + + input[, newABUNDANCE := ABUNDANCE] + input[, nonmissing := .getNonMissingFilter(.SD, impute, censored_symbol)] + input[, n_obs := sum(nonmissing), by = c("PROTEIN", "FEATURE")] + # remove feature with 1 measurement + input[, nonmissing := ifelse(n_obs <= 1, FALSE, nonmissing)] + input[, n_obs_run := sum(nonmissing), by = c("PROTEIN", "RUN")] + + input[, total_features := uniqueN(FEATURE), by = "PROTEIN"] + input[, prop_features := sum(nonmissing) / total_features, + by = c("PROTEIN", "RUN") + ] + input } @@ -138,40 +142,42 @@ getProcessed = function(input) { #' @inheritParams .prepareSummary #' @return data.table #' @keywords internal -.prepareTMP = function(input, impute, censored_symbol) { - censored = feature_quality = newABUNDANCE = cen = nonmissing = n_obs = NULL - n_obs_run = total_features = FEATURE = prop_features = NULL - remove50missing = ABUNDANCE = NULL - - if (impute & !is.null(censored_symbol)) { - if (is.element("feature_quality", colnames(input))) { - input[, censored := ifelse(feature_quality == "Informative", - censored, FALSE)] - } - if (censored_symbol == "0") { - input[, newABUNDANCE := ifelse(censored, 0, ABUNDANCE)] - } else if (censored_symbol == "NA") { - input[, newABUNDANCE := ifelse(censored, NA, ABUNDANCE)] - } - input[, cen := ifelse(censored, 0, 1)] - } else { - input[, newABUNDANCE := ABUNDANCE] +.prepareTMP <- function(input, impute, censored_symbol) { + censored <- feature_quality <- newABUNDANCE <- cen <- nonmissing <- n_obs <- NULL + n_obs_run <- total_features <- FEATURE <- prop_features <- NULL + remove50missing <- ABUNDANCE <- NULL + + if (impute & !is.null(censored_symbol)) { + if (is.element("feature_quality", colnames(input))) { + input[, censored := ifelse(feature_quality == "Informative", + censored, FALSE + )] } - - input[, nonmissing := .getNonMissingFilter(input, impute, censored_symbol)] - input[, n_obs := sum(nonmissing), by = c("PROTEIN", "FEATURE")] - input[, nonmissing := ifelse(n_obs <= 1, FALSE, nonmissing)] - input[, n_obs_run := sum(nonmissing), by = c("PROTEIN", "RUN")] - - input[, total_features := uniqueN(FEATURE), by = "PROTEIN"] - input[, prop_features := sum(nonmissing) / total_features, - by = c("PROTEIN", "RUN")] - - if (is.element("cen", colnames(input))) { - if (any(input[["cen"]] == 0)) { - .setCensoredByThreshold(input, censored_symbol, remove50missing) - } + if (censored_symbol == "0") { + input[, newABUNDANCE := ifelse(censored, 0, ABUNDANCE)] + } else if (censored_symbol == "NA") { + input[, newABUNDANCE := ifelse(censored, NA, ABUNDANCE)] + } + input[, cen := ifelse(censored, 0, 1)] + } else { + input[, newABUNDANCE := ABUNDANCE] + } + + input[, nonmissing := .getNonMissingFilter(input, impute, censored_symbol)] + input[, n_obs := sum(nonmissing), by = c("PROTEIN", "FEATURE")] + input[, nonmissing := ifelse(n_obs <= 1, FALSE, nonmissing)] + input[, n_obs_run := sum(nonmissing), by = c("PROTEIN", "RUN")] + + input[, total_features := uniqueN(FEATURE), by = "PROTEIN"] + input[, prop_features := sum(nonmissing) / total_features, + by = c("PROTEIN", "RUN") + ] + + if (is.element("cen", colnames(input))) { + if (any(input[["cen"]] == 0)) { + .setCensoredByThreshold(input, censored_symbol, remove50missing) } + } - input + input } diff --git a/man/MSstatsContrastMatrix.Rd b/man/MSstatsContrastMatrix.Rd index faf30fe3..a29e648c 100644 --- a/man/MSstatsContrastMatrix.Rd +++ b/man/MSstatsContrastMatrix.Rd @@ -8,7 +8,7 @@ MSstatsContrastMatrix(contrasts, conditions, labels = NULL) } \arguments{ \item{contrasts}{One of the following: -i) list of lists. Each sub-list consists of two vectors that name +i) list of lists. Each sub-list consists of two vectors that name conditions that will be compared. See the details section for more information ii) matrix. In this case, it's correctness will be checked iii) "pairwise". In this case, pairwise comparison matrix will be generated diff --git a/man/MSstatsGroupComparison.Rd b/man/MSstatsGroupComparison.Rd index 0b48d455..e8048d4a 100644 --- a/man/MSstatsGroupComparison.Rd +++ b/man/MSstatsGroupComparison.Rd @@ -24,8 +24,8 @@ MSstatsGroupComparison( \item{samples_info}{data.table, output of getSamplesInfo function} -\item{numberOfCores}{Number of cores for parallel processing. When > 1, -a logfile named `MSstats_groupComparison_log_progress.log` is created to +\item{numberOfCores}{Number of cores for parallel processing. When > 1, +a logfile named `MSstats_groupComparison_log_progress.log` is created to track progress. Only works for Linux & Mac OS.} } \description{ @@ -33,16 +33,18 @@ Group comparison } \examples{ QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) -group_comparison_input = MSstatsPrepareForGroupComparison(QuantData) +group_comparison_input <- MSstatsPrepareForGroupComparison(QuantData) levels(QuantData$ProteinLevelData$GROUP) -comparison <- matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) +comparison <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) row.names(comparison) <- "T7-T1" -groups = levels(QuantData$ProteinLevelData$GROUP) +groups <- levels(QuantData$ProteinLevelData$GROUP) colnames(comparison) <- groups[order(as.numeric(groups))] -samples_info = getSamplesInfo(QuantData) -repeated = checkRepeatedDesign(QuantData) -group_comparison = MSstatsGroupComparison(group_comparison_input, comparison, - FALSE, repeated, samples_info) +samples_info <- getSamplesInfo(QuantData) +repeated <- checkRepeatedDesign(QuantData) +group_comparison <- MSstatsGroupComparison( + group_comparison_input, comparison, + FALSE, repeated, samples_info +) length(group_comparison) # list of length equal to number of proteins group_comparison[[1]][[1]] # data used to fit linear model group_comparison[[1]][[2]] # comparison result diff --git a/man/MSstatsGroupComparisonOutput.Rd b/man/MSstatsGroupComparisonOutput.Rd index 31d2a5af..bdede0d7 100644 --- a/man/MSstatsGroupComparisonOutput.Rd +++ b/man/MSstatsGroupComparisonOutput.Rd @@ -21,18 +21,22 @@ Create output of group comparison based on results for individual proteins } \examples{ QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) -group_comparison_input = MSstatsPrepareForGroupComparison(QuantData) +group_comparison_input <- MSstatsPrepareForGroupComparison(QuantData) levels(QuantData$ProteinLevelData$GROUP) -comparison <- matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) +comparison <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) row.names(comparison) <- "T7-T1" -groups = levels(QuantData$ProteinLevelData$GROUP) +groups <- levels(QuantData$ProteinLevelData$GROUP) colnames(comparison) <- groups[order(as.numeric(groups))] -samples_info = getSamplesInfo(QuantData) -repeated = checkRepeatedDesign(QuantData) -group_comparison = MSstatsGroupComparison(group_comparison_input, comparison, - FALSE, repeated, samples_info) -group_comparison_final = MSstatsGroupComparisonOutput(group_comparison, - QuantData) -group_comparison_final[["ComparisonResult"]] - +samples_info <- getSamplesInfo(QuantData) +repeated <- checkRepeatedDesign(QuantData) +group_comparison <- MSstatsGroupComparison( + group_comparison_input, comparison, + FALSE, repeated, samples_info +) +group_comparison_final <- MSstatsGroupComparisonOutput( + group_comparison, + QuantData +) +group_comparison_final[["ComparisonResult"]] + } diff --git a/man/MSstatsGroupComparisonSingleProtein.Rd b/man/MSstatsGroupComparisonSingleProtein.Rd index fa279ba1..b8de2d52 100644 --- a/man/MSstatsGroupComparisonSingleProtein.Rd +++ b/man/MSstatsGroupComparisonSingleProtein.Rd @@ -37,15 +37,16 @@ Group comparison for a single protein QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) group_comparison_input <- MSstatsPrepareForGroupComparison(QuantData) levels(QuantData$ProteinLevelData$GROUP) -comparison <- matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) +comparison <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) row.names(comparison) <- "T7-T1" -groups = levels(QuantData$ProteinLevelData$GROUP) +groups <- levels(QuantData$ProteinLevelData$GROUP) colnames(comparison) <- groups[order(as.numeric(groups))] samples_info <- getSamplesInfo(QuantData) repeated <- checkRepeatedDesign(QuantData) single_output <- MSstatsGroupComparisonSingleProtein( group_comparison_input[[1]], comparison, repeated, groups, samples_info, - FALSE, TRUE) + FALSE, TRUE +) single_output # same as a single element of MSstatsGroupComparison output } diff --git a/man/MSstatsHandleMissing.Rd b/man/MSstatsHandleMissing.Rd index 8fc72da2..65f9ef79 100644 --- a/man/MSstatsHandleMissing.Rd +++ b/man/MSstatsHandleMissing.Rd @@ -17,7 +17,7 @@ MSstatsHandleMissing( \item{summary_method}{summarization method (`summaryMethod` parameter to `dataProcess`)} -\item{impute}{if TRUE, missing values are supposed to be imputed +\item{impute}{if TRUE, missing values are supposed to be imputed (`MBimpute` parameter to `dataProcess`)} \item{missing_symbol}{`censoredInt` parameter to `dataProcess`} @@ -31,15 +31,15 @@ data.table Handle censored missing values } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) -input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) +input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) head(input) - + } diff --git a/man/MSstatsMergeFractions.Rd b/man/MSstatsMergeFractions.Rd index 2ee7e9f5..d5ab051b 100644 --- a/man/MSstatsMergeFractions.Rd +++ b/man/MSstatsMergeFractions.Rd @@ -16,14 +16,14 @@ data.table Re-format the data before feature selection } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) head(input) } diff --git a/man/MSstatsNormalize.Rd b/man/MSstatsNormalize.Rd index 1da3e808..4d481fa9 100644 --- a/man/MSstatsNormalize.Rd +++ b/man/MSstatsNormalize.Rd @@ -19,10 +19,10 @@ MSstatsNormalize( "QUANTILE" normalization for quantile normalization from `preprocessCore` package, "GLOBALSTANDARDS" for normalization based on selected peptides or proteins.} -\item{peptides_dict}{`data.table` of names of peptides and their corresponding +\item{peptides_dict}{`data.table` of names of peptides and their corresponding features.} -\item{standards}{character vector with names of standards, required if +\item{standards}{character vector with names of standards, required if "GLOBALSTANDARDS" method was selected.} } \value{ @@ -32,13 +32,13 @@ data.table Normalize MS data } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") # median normalization +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") # median normalization head(input) } diff --git a/man/MSstatsPrepareForDataProcess.Rd b/man/MSstatsPrepareForDataProcess.Rd index 364ec889..e6d3bb8f 100644 --- a/man/MSstatsPrepareForDataProcess.Rd +++ b/man/MSstatsPrepareForDataProcess.Rd @@ -23,12 +23,12 @@ data.table Prepare data for processing by `dataProcess` function } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) head(input) } diff --git a/man/MSstatsPrepareForGroupComparison.Rd b/man/MSstatsPrepareForGroupComparison.Rd index 630180f4..aa3b1f5c 100644 --- a/man/MSstatsPrepareForGroupComparison.Rd +++ b/man/MSstatsPrepareForGroupComparison.Rd @@ -10,7 +10,7 @@ MSstatsPrepareForGroupComparison(summarization_output) \item{summarization_output}{output of dataProcess} } \value{ -list of run-level data for each protein in the input. +list of run-level data for each protein in the input. This list has a "has_imputed" attribute that indicates if missing values were imputed in the input dataset. } @@ -19,7 +19,7 @@ Prepare output for dataProcess for group comparison } \examples{ QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) -group_comparison_input = MSstatsPrepareForGroupComparison(QuantData) +group_comparison_input <- MSstatsPrepareForGroupComparison(QuantData) length(group_comparison_input) # list of length equal to number of proteins # in protein-level data of QuantData head(group_comparison_input[[1]]) diff --git a/man/MSstatsPrepareForSummarization.Rd b/man/MSstatsPrepareForSummarization.Rd index dff9368a..7ca1dddb 100644 --- a/man/MSstatsPrepareForSummarization.Rd +++ b/man/MSstatsPrepareForSummarization.Rd @@ -20,11 +20,11 @@ MSstatsPrepareForSummarization( \item{impute}{if TRUE, censored missing values will be imputed - `MBimpute` parameter of the dataProcess function} -\item{censored_symbol}{censored missing value indicator - `censoredInt` +\item{censored_symbol}{censored missing value indicator - `censoredInt` parameter of the dataProcess function} -\item{remove_uninformative_feature_outlier}{if TRUE, features labeled as -outlier of uninformative by the MSstatsSelectFeatures function will not be +\item{remove_uninformative_feature_outlier}{if TRUE, features labeled as +outlier of uninformative by the MSstatsSelectFeatures function will not be used in summarization} } \value{ @@ -34,12 +34,12 @@ data.table Prepare feature-level data for protein-level summarization } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) head(input) } diff --git a/man/MSstatsSelectFeatures.Rd b/man/MSstatsSelectFeatures.Rd index 155f0579..bc9dcc20 100644 --- a/man/MSstatsSelectFeatures.Rd +++ b/man/MSstatsSelectFeatures.Rd @@ -22,18 +22,18 @@ data.table Feature selection before feature-level data summarization } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) -input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -input_all = MSstatsSelectFeatures(input, "all") # all features -input_5 = MSstatsSelectFeatures(data.table::copy(input), "topN", top_n = 5) # top 5 features -input_informative = MSstatsSelectFeatures(input, "highQuality") # feature selection +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) +input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +input_all <- MSstatsSelectFeatures(input, "all") # all features +input_5 <- MSstatsSelectFeatures(data.table::copy(input), "topN", top_n = 5) # top 5 features +input_informative <- MSstatsSelectFeatures(input, "highQuality") # feature selection head(input_all) head(input_5) diff --git a/man/MSstatsSummarizationOutput.Rd b/man/MSstatsSummarizationOutput.Rd index 280087c0..103404c5 100644 --- a/man/MSstatsSummarizationOutput.Rd +++ b/man/MSstatsSummarizationOutput.Rd @@ -26,13 +26,13 @@ MSstatsSummarizationOutput( \item{impute}{if TRUE, censored missing values were imputed (`MBimpute` parameter to `dataProcess`)} -\item{censored_symbol}{censored missing value indicator +\item{censored_symbol}{censored missing value indicator (`censoredInt` parameter to `dataProcess`)} } \value{ list that consists of the following elements: \itemize{ -\item{FeatureLevelData}{ - feature-level data after processing} +\item{FeatureLevelData}{ - feature-level data after processing} \item{ProteinLevelData}{ - protein-level (summarized) data} \item{SummaryMethod}{ (string) - name of summarization method that was used} } @@ -41,20 +41,22 @@ list that consists of the following elements: Post-processing output from MSstats summarization } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) -input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -input = MSstatsSelectFeatures(input, "all") -processed = getProcessed(input) -input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -summarized = MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) -output = output = MSstatsSummarizationOutput(input, summarized, processed, -method, impute, cens) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) +input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +input <- MSstatsSelectFeatures(input, "all") +processed <- getProcessed(input) +input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +summarized <- MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) +output <- output <- MSstatsSummarizationOutput( + input, summarized, processed, + method, impute, cens +) } diff --git a/man/MSstatsSummarizeSingleLinear.Rd b/man/MSstatsSummarizeSingleLinear.Rd index 82dde780..700ac2b4 100644 --- a/man/MSstatsSummarizeSingleLinear.Rd +++ b/man/MSstatsSummarizeSingleLinear.Rd @@ -18,20 +18,20 @@ list with protein-level data Linear model-based summarization for a single protein } \examples{ -raw = DDARawData -method = "linear" -cens = NULL -impute = FALSE +raw <- DDARawData +method <- "linear" +cens <- NULL +impute <- FALSE # currently, MSstats only supports MBimpute = FALSE for linear summarization MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) -input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -input = MSstatsSelectFeatures(input, "all") -input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -input_split = split(input, input$PROTEIN) -single_protein_summary = MSstatsSummarizeSingleLinear(input_split[[1]]) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) +input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +input <- MSstatsSelectFeatures(input, "all") +input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +input_split <- split(input, input$PROTEIN) +single_protein_summary <- MSstatsSummarizeSingleLinear(input_split[[1]]) head(single_protein_summary[[1]]) } diff --git a/man/MSstatsSummarizeSingleTMP.Rd b/man/MSstatsSummarizeSingleTMP.Rd index 9d656887..b592f4b8 100644 --- a/man/MSstatsSummarizeSingleTMP.Rd +++ b/man/MSstatsSummarizeSingleTMP.Rd @@ -14,18 +14,18 @@ MSstatsSummarizeSingleTMP( \arguments{ \item{single_protein}{feature-level data for a single protein} -\item{impute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. -TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. +\item{impute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. +TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. FALSE uses the values assigned by cutoffCensored} -\item{censored_symbol}{Missing values are censored or at random. -'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -'0' uses zero intensities as censored intensity. -In this case, NA intensities are missing at random. -The output from Skyline should use '0'. +\item{censored_symbol}{Missing values are censored or at random. +'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +'0' uses zero intensities as censored intensity. +In this case, NA intensities are missing at random. +The output from Skyline should use '0'. Null assumes that all NA intensites are randomly missing.} -\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins +\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins where every run has at least 50\% missing values for each peptide. FALSE is default.} } \value{ @@ -36,21 +36,23 @@ the other with protein-level data Tukey Median Polish summarization for a single protein } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE # currently, MSstats only supports MBimpute = FALSE for linear summarization MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) -input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -input = MSstatsSelectFeatures(input, "all") -input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -input_split = split(input, input$PROTEIN) -single_protein_summary = MSstatsSummarizeSingleTMP(input_split[[1]], - impute, cens, FALSE) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) +input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +input <- MSstatsSelectFeatures(input, "all") +input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +input_split <- split(input, input$PROTEIN) +single_protein_summary <- MSstatsSummarizeSingleTMP( + input_split[[1]], + impute, cens, FALSE +) head(single_protein_summary[[1]]) } diff --git a/man/MSstatsSummarizeWithMultipleCores.Rd b/man/MSstatsSummarizeWithMultipleCores.Rd index 7387110e..04363fa7 100644 --- a/man/MSstatsSummarizeWithMultipleCores.Rd +++ b/man/MSstatsSummarizeWithMultipleCores.Rd @@ -19,29 +19,29 @@ MSstatsSummarizeWithMultipleCores( \item{method}{summarization method: "linear" or "TMP"} -\item{impute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. -TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. +\item{impute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. +TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. FALSE uses the values assigned by cutoffCensored} -\item{censored_symbol}{Missing values are censored or at random. -'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -'0' uses zero intensities as censored intensity. -In this case, NA intensities are missing at random. -The output from Skyline should use '0'. +\item{censored_symbol}{Missing values are censored or at random. +'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +'0' uses zero intensities as censored intensity. +In this case, NA intensities are missing at random. +The output from Skyline should use '0'. Null assumes that all NA intensites are randomly missing.} -\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins +\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins where every run has at least 50\% missing values for each peptide. FALSE is default.} -\item{equal_variance}{only for summaryMethod = "linear". Default is TRUE. -Logical variable for whether the model should account for heterogeneous variation +\item{equal_variance}{only for summaryMethod = "linear". Default is TRUE. +Logical variable for whether the model should account for heterogeneous variation among intensities from different features. Default is TRUE, which assume equal -variance among intensities from features. FALSE means that we cannot assume +variance among intensities from features. FALSE means that we cannot assume equal variance among intensities from features, then we will account for heterogeneous variation from different features.} -\item{numberOfCores}{Number of cores for parallel processing. When > 1, -a logfile named `MSstats_dataProcess_log_progress.log` is created to +\item{numberOfCores}{Number of cores for parallel processing. When > 1, +a logfile named `MSstats_dataProcess_log_progress.log` is created to track progress. Only works for Linux & Mac OS. Default is 1.} } \value{ diff --git a/man/MSstatsSummarizeWithSingleCore.Rd b/man/MSstatsSummarizeWithSingleCore.Rd index 84b64a8c..d18530a5 100644 --- a/man/MSstatsSummarizeWithSingleCore.Rd +++ b/man/MSstatsSummarizeWithSingleCore.Rd @@ -18,24 +18,24 @@ MSstatsSummarizeWithSingleCore( \item{method}{summarization method: "linear" or "TMP"} -\item{impute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. -TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. +\item{impute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. +TRUE (default) imputes 'NA' or '0' (depending on censoredInt option) by Accelated failure model. FALSE uses the values assigned by cutoffCensored} -\item{censored_symbol}{Missing values are censored or at random. -'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -'0' uses zero intensities as censored intensity. -In this case, NA intensities are missing at random. -The output from Skyline should use '0'. +\item{censored_symbol}{Missing values are censored or at random. +'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +'0' uses zero intensities as censored intensity. +In this case, NA intensities are missing at random. +The output from Skyline should use '0'. Null assumes that all NA intensites are randomly missing.} -\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins +\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins where every run has at least 50\% missing values for each peptide. FALSE is default.} -\item{equal_variance}{only for summaryMethod = "linear". Default is TRUE. -Logical variable for whether the model should account for heterogeneous variation +\item{equal_variance}{only for summaryMethod = "linear". Default is TRUE. +Logical variable for whether the model should account for heterogeneous variation among intensities from different features. Default is TRUE, which assume equal -variance among intensities from features. FALSE means that we cannot assume +variance among intensities from features. FALSE means that we cannot assume equal variance among intensities from features, then we will account for heterogeneous variation from different features.} } @@ -46,19 +46,19 @@ list of length one with run-level data. Feature-level data summarization with 1 core } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) -input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -input = MSstatsSelectFeatures(input, "all") -processed = getProcessed(input) -input = MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) -summarized = MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) +input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +input <- MSstatsSelectFeatures(input, "all") +processed <- getProcessed(input) +input <- MSstatsPrepareForSummarization(input, method, impute, cens, FALSE) +summarized <- MSstatsSummarizeWithSingleCore(input, method, impute, cens, FALSE, TRUE) length(summarized) # list of summarization outputs for each protein head(summarized[[1]][[1]]) # run-level summary diff --git a/man/SDRFtoAnnotation.Rd b/man/SDRFtoAnnotation.Rd index b45f34bb..ab5d407a 100644 --- a/man/SDRFtoAnnotation.Rd +++ b/man/SDRFtoAnnotation.Rd @@ -15,35 +15,35 @@ SDRFtoAnnotation( \arguments{ \item{data}{SDRF annotation file} -\item{run_name}{Column name in SDRF file which contains the name of the MS +\item{run_name}{Column name in SDRF file which contains the name of the MS run. The information in this column must match exactly with the run names in the PSM file} -\item{condition_name}{Column name in SDRF file which contains information on +\item{condition_name}{Column name in SDRF file which contains information on the conditions in the data.} -\item{biological_replicate}{Column name in SDRF file which contains the -identifier for the biological replicte. Note MSstats uses this column to -determine if the experiment is a repeated measure design. BioReplicte IDs +\item{biological_replicate}{Column name in SDRF file which contains the +identifier for the biological replicte. Note MSstats uses this column to +determine if the experiment is a repeated measure design. BioReplicte IDs should only be reused if the replicate was measured multiple times.} -\item{fraction}{Column name in SDFT file which contains information on the -fractionation in the data. Only required if data contains fractions. Default +\item{fraction}{Column name in SDFT file which contains information on the +fractionation in the data. Only required if data contains fractions. Default is `NULL`} } \description{ Takes an SDRF file and outputs an MSstats annotation file. Note -the information in the SDRF file must be correctly annotated for MSstats so -that MSstats can identify the experimental design. In particular the -biological replicates must be correctly annotated, with group comparison -experiments having a unique ID for each BioReplicate. For more information -on this please see the Supplementary of the most recent +the information in the SDRF file must be correctly annotated for MSstats so +that MSstats can identify the experimental design. In particular the +biological replicates must be correctly annotated, with group comparison +experiments having a unique ID for each BioReplicate. For more information +on this please see the Supplementary of the most recent \href{https://pubs.acs.org/doi/10.1021/acs.jproteome.2c00834}{MSstats paper} } \examples{ head(example_SDRF) -msstats_annotation = SDRFtoAnnotation(example_SDRF) +msstats_annotation <- SDRFtoAnnotation(example_SDRF) head(msstats_annotation) } diff --git a/man/dataProcess.Rd b/man/dataProcess.Rd index bb934741..1f4a38ff 100644 --- a/man/dataProcess.Rd +++ b/man/dataProcess.Rd @@ -32,30 +32,30 @@ dataProcess( \item{logTrans}{base of logarithm transformation: 2 (default) or 10.} -\item{normalization}{normalization to remove systematic bias between MS runs. +\item{normalization}{normalization to remove systematic bias between MS runs. There are three different normalizations supported: -'equalizeMedians' (default) represents constant normalization (equalizing the medians) -based on reference signals is performed. -'quantile' represents quantile normalization based on reference signals -'globalStandards' represents normalization with global standards proteins. -If FALSE, no normalization is performed. See MSstats vignettes for +'equalizeMedians' (default) represents constant normalization (equalizing the medians) +based on reference signals is performed. +'quantile' represents quantile normalization based on reference signals +'globalStandards' represents normalization with global standards proteins. +If FALSE, no normalization is performed. See MSstats vignettes for recommendations on which normalization option to use.} -\item{nameStandards}{optional vector of global standard peptide names. +\item{nameStandards}{optional vector of global standard peptide names. Required only for normalization with global standard peptides.} -\item{featureSubset}{"all" (default) uses all features that the data set has. -"top3" uses top 3 features which have highest average of log-intensity across runs. -"topN" uses top N features which has highest average of log-intensity across runs. -It needs the input for n_top_feature option. -"highQuality" flags uninformative feature and outliers. See MSstats vignettes for +\item{featureSubset}{"all" (default) uses all features that the data set has. +"top3" uses top 3 features which have highest average of log-intensity across runs. +"topN" uses top N features which has highest average of log-intensity across runs. +It needs the input for n_top_feature option. +"highQuality" flags uninformative feature and outliers. See MSstats vignettes for recommendations on which feature selection option to use.} -\item{remove_uninformative_feature_outlier}{optional. Only required if -featureSubset = "highQuality". TRUE allows to remove +\item{remove_uninformative_feature_outlier}{optional. Only required if +featureSubset = "highQuality". TRUE allows to remove 1) noisy features (flagged in the column feature_quality with "Uninformative"), -2) outliers (flagged in the column, is_outlier with TRUE, -before run-level summarization. FALSE (default) uses all features and intensities +2) outliers (flagged in the column, is_outlier with TRUE, +before run-level summarization. FALSE (default) uses all features and intensities for run-level summarization.} \item{min_feature_count}{optional. Only required if featureSubset = "highQuality". @@ -66,30 +66,30 @@ in the feature selection algorithm.} It that case, it specifies number of top features that will be used. Default is 3, which means to use top 3 features.} -\item{summaryMethod}{"TMP" (default) means Tukey's median polish, +\item{summaryMethod}{"TMP" (default) means Tukey's median polish, which is robust estimation method. "linear" uses linear mixed model.} -\item{equalFeatureVar}{only for summaryMethod = "linear". default is TRUE. -Logical variable for whether the model should account for heterogeneous variation -among intensities from different features. Default is TRUE, which assume equal -variance among intensities from features. FALSE means that we cannot assume equal -variance among intensities from features, then we will account for heterogeneous +\item{equalFeatureVar}{only for summaryMethod = "linear". default is TRUE. +Logical variable for whether the model should account for heterogeneous variation +among intensities from different features. Default is TRUE, which assume equal +variance among intensities from features. FALSE means that we cannot assume equal +variance among intensities from features, then we will account for heterogeneous variation from different features.} -\item{censoredInt}{Missing values are censored or at random. -'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -'0' uses zero intensities as censored intensity. -In this case, NA intensities are missing at random. -The output from Skyline should use '0'. +\item{censoredInt}{Missing values are censored or at random. +'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +'0' uses zero intensities as censored intensity. +In this case, NA intensities are missing at random. +The output from Skyline should use '0'. Null assumes that all NA intensites are randomly missing.} -\item{MBimpute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. -TRUE (default) imputes missing values with 'NA' or '0' (depending on censoredInt option) -by Accelerated failure model. If set to FALSE, no missing values are imputed. +\item{MBimpute}{only for summaryMethod = "TMP" and censoredInt = 'NA' or '0'. +TRUE (default) imputes missing values with 'NA' or '0' (depending on censoredInt option) +by Accelerated failure model. If set to FALSE, no missing values are imputed. FALSE is appropriate only when missingness is assumed to be at random. See MSstats vignettes for recommendations on which imputation option to use.} -\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins +\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins where every run has at least 50\% missing values for each peptide. FALSE is default.} \item{fix_missing}{Optional, same as the `fix_missing` parameter in MSstatsConvert::MSstatsBalancedDesign function} @@ -105,13 +105,13 @@ to an existing log file.} \item{verbose}{logical. If TRUE, information about data processing wil be printed to the console.} -\item{log_file_path}{character. Path to a file to which information about -data processing will be saved. +\item{log_file_path}{character. Path to a file to which information about +data processing will be saved. If not provided, such a file will be created automatically. If `append = TRUE`, has to be a valid path to a file.} -\item{numberOfCores}{Number of cores for parallel processing. When > 1, -a logfile named `MSstats_dataProcess_log_progress.log` is created to +\item{numberOfCores}{Number of cores for parallel processing. When > 1, +a logfile named `MSstats_dataProcess_log_progress.log` is created to track progress. Only works for Linux & Mac OS. Default is 1.} } \value{ @@ -119,6 +119,7 @@ A list containing: \describe{ \item{FeatureLevelData}{A data frame with feature-level information after processing. Columns include: \describe{ + \item{PROTEIN}{Identifier for the protein associated with the feature.} \item{PROTEIN}{Identifier for the protein associated with the feature.} \item{PEPTIDE}{Identifier for the peptide sequence.} \item{TRANSITION}{Identifier for the transition, typically representing a specific ion pair.} @@ -163,13 +164,13 @@ Process MS data: clean, normalize and summarize before differential analysis # across time points. head(SRMRawData) # Log2 transformation and normalization are applied (default) -QuantData<-dataProcess(SRMRawData, use_log_file = FALSE) +QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) # Log10 transformation and normalization are applied -QuantData1<-dataProcess(SRMRawData, logTrans=10, use_log_file = FALSE) +QuantData1 <- dataProcess(SRMRawData, logTrans = 10, use_log_file = FALSE) head(QuantData1$FeatureLevelData) # Log2 transformation and no normalization are applied -QuantData2<-dataProcess(SRMRawData,normalization=FALSE, use_log_file = FALSE) +QuantData2 <- dataProcess(SRMRawData, normalization = FALSE, use_log_file = FALSE) head(QuantData2$FeatureLevelData) } diff --git a/man/dataProcessPlots.Rd b/man/dataProcessPlots.Rd index 2d1f08ca..9f7f4496 100644 --- a/man/dataProcessPlots.Rd +++ b/man/dataProcessPlots.Rd @@ -33,43 +33,43 @@ dataProcessPlots( \arguments{ \item{data}{name of the (output of dataProcess function) data set.} -\item{type}{choice of visualization. "ProfilePlot" represents profile plot of -log intensities across MS runs. "QCPlot" represents quality control plot of -log intensities across MS runs. "ConditionPlot" represents mean plot of log +\item{type}{choice of visualization. "ProfilePlot" represents profile plot of +log intensities across MS runs. "QCPlot" represents quality control plot of +log intensities across MS runs. "ConditionPlot" represents mean plot of log ratios (Light/Heavy) across conditions.} -\item{featureName}{for "ProfilePlot" only, "Transition" (default) means -printing feature legend in transition-level; "Peptide" means printing feature +\item{featureName}{for "ProfilePlot" only, "Transition" (default) means +printing feature legend in transition-level; "Peptide" means printing feature legend in peptide-level; "NA" means no feature legend printing.} -\item{ylimUp}{upper limit for y-axis in the log scale. FALSE(Default) for -Profile Plot and QC Plot use the upper limit as rounded off maximum of +\item{ylimUp}{upper limit for y-axis in the log scale. FALSE(Default) for +Profile Plot and QC Plot use the upper limit as rounded off maximum of log2(intensities) after normalization + 3. FALSE(Default) for Condition Plot is maximum of log ratio + SD or CI.} -\item{ylimDown}{lower limit for y-axis in the log scale. FALSE(Default) for +\item{ylimDown}{lower limit for y-axis in the log scale. FALSE(Default) for Profile Plot and QC Plot is 0. FALSE(Default) for Condition Plot is minumum of log ratio - SD or CI.} -\item{scale}{for "ConditionPlot" only, FALSE(default) means each conditional -level is not scaled at x-axis according to its actual value (equal space at +\item{scale}{for "ConditionPlot" only, FALSE(default) means each conditional +level is not scaled at x-axis according to its actual value (equal space at x-axis). TRUE means each conditional level is scaled at x-axis according to its actual value (unequal space at x-axis).} -\item{interval}{for "ConditionPlot" only, "CI"(default) uses confidence -interval with 0.95 significant level for the width of error bar. +\item{interval}{for "ConditionPlot" only, "CI"(default) uses confidence +interval with 0.95 significant level for the width of error bar. "SD" uses standard deviation for the width of error bar.} -\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and +\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and QC Plot, and "Condition" in Condition Plot. Default is 10.} \item{y.axis.size}{size of y-axis labels. Default is 10.} -\item{text.size}{size of labels represented each condition at the top of +\item{text.size}{size of labels represented each condition at the top of graph in Profile Plot and QC plot. Default is 4.} \item{text.angle}{angle of labels represented each condition at the top -of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. +of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. Default is 0.} \item{legend.size}{size of feature legend (transition-level or peptide-level) @@ -85,47 +85,47 @@ above graph in Profile Plot. Default is 7.} \item{which.Protein}{Protein list to draw plots. List can be names of Proteins or order numbers of Proteins from levels(data$FeatureLevelData$PROTEIN). -Default is "all", which generates all plots for each protein. +Default is "all", which generates all plots for each protein. For QC plot, "allonly" will generate one QC plot with all proteins.} \item{originalPlot}{TRUE(default) draws original profile plots.} -\item{summaryPlot}{TRUE(default) draws profile plots with +\item{summaryPlot}{TRUE(default) draws profile plots with summarization for run levels.} -\item{save_condition_plot_result}{TRUE saves the table with values +\item{save_condition_plot_result}{TRUE saves the table with values using condition plots. Default is FALSE.} -\item{remove_uninformative_feature_outlier}{It only works after users used +\item{remove_uninformative_feature_outlier}{It only works after users used featureSubset="highQuality" in dataProcess. TRUE allows to remove -1) the features are flagged in the column, feature_quality="Uninformative" -which are features with bad quality, -2) outliers that are flagged in the column, is_outlier=TRUE in Profile plots. +1) the features are flagged in the column, feature_quality="Uninformative" +which are features with bad quality, +2) outliers that are flagged in the column, is_outlier=TRUE in Profile plots. FALSE (default) shows all features and intensities in profile plots.} \item{address}{prefix for the filename that will store the results.} -\item{isPlotly}{Parameter to use Plotly or ggplot2. If set to TRUE, MSstats +\item{isPlotly}{Parameter to use Plotly or ggplot2. If set to TRUE, MSstats will save Plotly plots as HTML files. If set to FALSE MSstats will save ggplot2 plots as PDF files -Default folder is the current working directory. +Default folder is the current working directory. The other assigned folder has to be existed under the current working directory. - An output pdf file is automatically created with the default name of - "ProfilePlot.pdf" or "QCplot.pdf" or "ConditionPlot.pdf" or "ConditionPlot_value.csv". - The command address can help to specify where to store the file as well as - how to modify the beginning of the file name. + An output pdf file is automatically created with the default name of + "ProfilePlot.pdf" or "QCplot.pdf" or "ConditionPlot.pdf" or "ConditionPlot_value.csv". + The command address can help to specify where to store the file as well as + how to modify the beginning of the file name. If address=FALSE, plot will be not saved as pdf file but showed in window.} } \description{ -To illustrate the quantitative data after data-preprocessing and -quality control of MS runs, dataProcessPlots takes the quantitative data from -function (\code{\link{dataProcess}}) as input and automatically generate -three types of figures in pdf files as output : -(1) profile plot (specify "ProfilePlot" in option type), -to identify the potential sources of variation for each protein; -(2) quality control plot (specify "QCPlot" in option type), -to evaluate the systematic bias between MS runs; -(3) mean plot for conditions (specify "ConditionPlot" in option type), +To illustrate the quantitative data after data-preprocessing and +quality control of MS runs, dataProcessPlots takes the quantitative data from +function (\code{\link{dataProcess}}) as input and automatically generate +three types of figures in pdf files as output : +(1) profile plot (specify "ProfilePlot" in option type), +to identify the potential sources of variation for each protein; +(2) quality control plot (specify "QCPlot" in option type), +to evaluate the systematic bias between MS runs; +(3) mean plot for conditions (specify "ConditionPlot" in option type), to illustrate mean and variability of each condition per protein. } \details{ @@ -137,20 +137,20 @@ to illustrate mean and variability of each condition per protein. The input of this function is the quantitative data from function \code{\link{dataProcess}}. } \examples{ -# Consider quantitative data (i.e. QuantData) from a yeast study with ten time points of interests, -# three biological replicates, and no technical replicates which is a time-course experiment. -# The goal is to provide pre-analysis visualization by automatically generate two types of figures -# in two separate pdf files. -# Protein IDHC (gene name IDP2) is differentially expressed in time point 1 and time point 7, +# Consider quantitative data (i.e. QuantData) from a yeast study with ten time points of interests, +# three biological replicates, and no technical replicates which is a time-course experiment. +# The goal is to provide pre-analysis visualization by automatically generate two types of figures +# in two separate pdf files. +# Protein IDHC (gene name IDP2) is differentially expressed in time point 1 and time point 7, # whereas, Protein PMG2 (gene name GPM2) is not. -QuantData<-dataProcess(SRMRawData, use_log_file = FALSE) +QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) # Profile plot -dataProcessPlots(data=QuantData,type="ProfilePlot") -# Quality control plot -dataProcessPlots(data=QuantData,type="QCPlot") +dataProcessPlots(data = QuantData, type = "ProfilePlot") +# Quality control plot +dataProcessPlots(data = QuantData, type = "QCPlot") # Quantification plot for conditions -dataProcessPlots(data=QuantData,type="ConditionPlot") +dataProcessPlots(data = QuantData, type = "ConditionPlot") } diff --git a/man/designSampleSize.Rd b/man/designSampleSize.Rd index 58e2230d..121639af 100644 --- a/man/designSampleSize.Rd +++ b/man/designSampleSize.Rd @@ -19,18 +19,18 @@ designSampleSize( \arguments{ \item{data}{'FittedModel' in testing output from function groupComparison.} -\item{desiredFC}{the range of a desired fold change which includes the lower +\item{desiredFC}{the range of a desired fold change which includes the lower and upper values of the desired fold change.} -\item{FDR}{a pre-specified false discovery ratio (FDR) to control the overall +\item{FDR}{a pre-specified false discovery ratio (FDR) to control the overall false positive rate. Default is 0.05} -\item{numSample}{minimal number of biological replicates per condition. -TRUE represents you require to calculate the sample size for this category, +\item{numSample}{minimal number of biological replicates per condition. +TRUE represents you require to calculate the sample size for this category, else you should input the exact number of biological replicates.} -\item{power}{a pre-specified statistical power which defined as the probability -of detecting a true fold change. TRUE represent you require to calculate the power +\item{power}{a pre-specified statistical power which defined as the probability +of detecting a true fold change. TRUE represent you require to calculate the power for this category, else you should input the average of power you expect. Default is 0.9} \item{use_log_file}{logical. If TRUE, information about data processing @@ -42,8 +42,8 @@ to an existing log file.} \item{verbose}{logical. If TRUE, information about data processing wil be printed to the console.} -\item{log_file_path}{character. Path to a file to which information about -data processing will be saved. +\item{log_file_path}{character. Path to a file to which information about +data processing will be saved. If not provided, such a file will be created automatically. If `append = TRUE`, has to be a valid path to a file.} } @@ -52,17 +52,17 @@ data.frame - sample size calculation results including varibles: desiredFC, numSample, FDR, and power. } \description{ -Calculate sample size for future experiments of a Selected Reaction Monitoring (SRM), -Data-Dependent Acquisition (DDA or shotgun), and Data-Independent Acquisition (DIA or SWATH-MS) experiment -based on intensity-based linear model. Two options of the calculation: -(1) number of biological replicates per condition, +Calculate sample size for future experiments of a Selected Reaction Monitoring (SRM), +Data-Dependent Acquisition (DDA or shotgun), and Data-Independent Acquisition (DIA or SWATH-MS) experiment +based on intensity-based linear model. Two options of the calculation: +(1) number of biological replicates per condition, (2) power. } \details{ -The function fits the model and uses variance components to calculate -sample size. The underlying model fitting with intensity-based linear model with +The function fits the model and uses variance components to calculate +sample size. The underlying model fitting with intensity-based linear model with technical MS run replication. Estimated sample size is rounded to 0 decimal. -The function can only obtain either one of the categories of the sample size +The function can only obtain either one of the categories of the sample size calculation (numSample, numPep, numTran, power) at the same time. } \examples{ @@ -71,23 +71,27 @@ calculation (numSample, numPep, numTran, power) at the same time. QuantData <- dataProcess(SRMRawData) head(QuantData$FeatureLevelData) ## based on multiple comparisons (T1 vs T3; T1 vs T7; T1 vs T9) -comparison1<-matrix(c(-1,0,1,0,0,0,0,0,0,0),nrow=1) -comparison2<-matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) -comparison3<-matrix(c(-1,0,0,0,0,0,0,0,1,0),nrow=1) -comparison<-rbind(comparison1,comparison2, comparison3) -row.names(comparison)<-c("T3-T1","T7-T1","T9-T1") -colnames(comparison)<-unique(QuantData$ProteinLevelData$GROUP) +comparison1 <- matrix(c(-1, 0, 1, 0, 0, 0, 0, 0, 0, 0), nrow = 1) +comparison2 <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) +comparison3 <- matrix(c(-1, 0, 0, 0, 0, 0, 0, 0, 1, 0), nrow = 1) +comparison <- rbind(comparison1, comparison2, comparison3) +row.names(comparison) <- c("T3-T1", "T7-T1", "T9-T1") +colnames(comparison) <- unique(QuantData$ProteinLevelData$GROUP) -testResultMultiComparisons<-groupComparison(contrast.matrix=comparison,data=QuantData) +testResultMultiComparisons <- groupComparison(contrast.matrix = comparison, data = QuantData) ## Calculate sample size for future experiments: -#(1) Minimal number of biological replicates per condition -designSampleSize(data=testResultMultiComparisons$FittedModel, numSample=TRUE, - desiredFC=c(1.25,1.75), FDR=0.05, power=0.8) -#(2) Power calculation -designSampleSize(data=testResultMultiComparisons$FittedModel, numSample=2, - desiredFC=c(1.25,1.75), FDR=0.05, power=TRUE) - +# (1) Minimal number of biological replicates per condition +designSampleSize( + data = testResultMultiComparisons$FittedModel, numSample = TRUE, + desiredFC = c(1.25, 1.75), FDR = 0.05, power = 0.8 +) +# (2) Power calculation +designSampleSize( + data = testResultMultiComparisons$FittedModel, numSample = 2, + desiredFC = c(1.25, 1.75), FDR = 0.05, power = TRUE +) + } \author{ Meena Choi, Ching-Yun Chang, Olga Vitek. diff --git a/man/designSampleSizePlots.Rd b/man/designSampleSizePlots.Rd index 743b6717..ad703427 100644 --- a/man/designSampleSizePlots.Rd +++ b/man/designSampleSizePlots.Rd @@ -9,7 +9,7 @@ designSampleSizePlots(data, isPlotly = FALSE) \arguments{ \item{data}{output from function designSampleSize.} -\item{isPlotly}{Parameter to use Plotly or ggplot2. If set to TRUE, MSstats +\item{isPlotly}{Parameter to use Plotly or ggplot2. If set to TRUE, MSstats will save Plotly plots as HTML files. If set to FALSE MSstats will save ggplot2 plots as PDF files} } @@ -17,10 +17,10 @@ as PDF files} Plot for estimated sample size with assigned variable. } \description{ -To illustrate the relationship of desired fold change and the calculated -minimal number sample size which are (1) number of biological replicates per condition, -(2) number of peptides per protein, -(3) number of transitions per peptide, and +To illustrate the relationship of desired fold change and the calculated +minimal number sample size which are (1) number of biological replicates per condition, +(2) number of peptides per protein, +(3) number of transitions per peptide, and (4) power. The input is the result from function (\code{\link{designSampleSize}}. } \details{ @@ -28,29 +28,33 @@ Data in the example is based on the results of sample size calculation from func } \examples{ # Based on the results of sample size calculation from function designSampleSize, -# we generate a series of sample size plots for number of biological replicates, or peptides, +# we generate a series of sample size plots for number of biological replicates, or peptides, # or transitions or power plot. -QuantData<-dataProcess(SRMRawData) +QuantData <- dataProcess(SRMRawData) head(QuantData$ProcessedData) ## based on multiple comparisons (T1 vs T3; T1 vs T7; T1 vs T9) -comparison1<-matrix(c(-1,0,1,0,0,0,0,0,0,0),nrow=1) -comparison2<-matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) -comparison3<-matrix(c(-1,0,0,0,0,0,0,0,1,0),nrow=1) -comparison<-rbind(comparison1,comparison2, comparison3) -row.names(comparison)<-c("T3-T1","T7-T1","T9-T1") -colnames(comparison)<-unique(QuantData$ProteinLevelData$GROUP) +comparison1 <- matrix(c(-1, 0, 1, 0, 0, 0, 0, 0, 0, 0), nrow = 1) +comparison2 <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) +comparison3 <- matrix(c(-1, 0, 0, 0, 0, 0, 0, 0, 1, 0), nrow = 1) +comparison <- rbind(comparison1, comparison2, comparison3) +row.names(comparison) <- c("T3-T1", "T7-T1", "T9-T1") +colnames(comparison) <- unique(QuantData$ProteinLevelData$GROUP) -testResultMultiComparisons<-groupComparison(contrast.matrix=comparison, data=QuantData) +testResultMultiComparisons <- groupComparison(contrast.matrix = comparison, data = QuantData) # plot the calculated sample sizes for future experiments: # (1) Minimal number of biological replicates per condition -result.sample<-designSampleSize(data=testResultMultiComparisons$FittedModel, numSample=TRUE, - desiredFC=c(1.25,1.75), FDR=0.05, power=0.8) -designSampleSizePlots(data=result.sample) +result.sample <- designSampleSize( + data = testResultMultiComparisons$FittedModel, numSample = TRUE, + desiredFC = c(1.25, 1.75), FDR = 0.05, power = 0.8 +) +designSampleSizePlots(data = result.sample) # (2) Power -result.power<-designSampleSize(data=testResultMultiComparisons$FittedModel, numSample=2, - desiredFC=c(1.25,1.75), FDR=0.05, power=TRUE) -designSampleSizePlots(data=result.power) +result.power <- designSampleSize( + data = testResultMultiComparisons$FittedModel, numSample = 2, + desiredFC = c(1.25, 1.75), FDR = 0.05, power = TRUE +) +designSampleSizePlots(data = result.power) } \author{ diff --git a/man/dot-calculatePower.Rd b/man/dot-calculatePower.Rd index 61bf5b9f..29771277 100644 --- a/man/dot-calculatePower.Rd +++ b/man/dot-calculatePower.Rd @@ -14,10 +14,10 @@ ) } \arguments{ -\item{desiredFC}{the range of a desired fold change which includes the lower +\item{desiredFC}{the range of a desired fold change which includes the lower and upper values of the desired fold change.} -\item{FDR}{a pre-specified false discovery ratio (FDR) to control the overall +\item{FDR}{a pre-specified false discovery ratio (FDR) to control the overall false positive rate. Default is 0.05} \item{delta}{difference between means (?)} @@ -26,8 +26,8 @@ false positive rate. Default is 0.05} \item{median_sigma_subject}{median standard deviation per subject} -\item{numSample}{minimal number of biological replicates per condition. -TRUE represents you require to calculate the sample size for this category, +\item{numSample}{minimal number of biological replicates per condition. +TRUE represents you require to calculate the sample size for this category, else you should input the exact number of biological replicates.} } \description{ diff --git a/man/dot-documentFunction.Rd b/man/dot-documentFunction.Rd index b187044e..7bed2a74 100644 --- a/man/dot-documentFunction.Rd +++ b/man/dot-documentFunction.Rd @@ -9,7 +9,7 @@ \arguments{ \item{removeFewMeasurements}{TRUE (default) will remove the features that have 1 or 2 measurements across runs.} -\item{useUniquePeptide}{TRUE (default) removes peptides that are assigned for more than one proteins. +\item{useUniquePeptide}{TRUE (default) removes peptides that are assigned for more than one proteins. We assume to use unique peptide for each protein.} \item{summaryforMultipleRows}{max(default) or sum - when there are multiple measurements for certain feature and certain run, use highest or sum of multiple intensities.} @@ -31,8 +31,8 @@ to an existing log file.} \item{verbose}{logical. If TRUE, information about data processing wil be printed to the console.} -\item{log_file_path}{character. Path to a file to which information about -data processing will be saved. +\item{log_file_path}{character. Path to a file to which information about +data processing will be saved. If not provided, such a file will be created automatically. If `append = TRUE`, has to be a valid path to a file.} } diff --git a/man/dot-getNonMissingFilterStats.Rd b/man/dot-getNonMissingFilterStats.Rd index e2c59980..8fc86f9d 100644 --- a/man/dot-getNonMissingFilterStats.Rd +++ b/man/dot-getNonMissingFilterStats.Rd @@ -9,11 +9,11 @@ \arguments{ \item{input}{data.table with data for a single protein} -\item{censored_symbol}{Missing values are censored or at random. -'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -'0' uses zero intensities as censored intensity. -In this case, NA intensities are missing at random. -The output from Skyline should use '0'. +\item{censored_symbol}{Missing values are censored or at random. +'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +'0' uses zero intensities as censored intensity. +In this case, NA intensities are missing at random. +The output from Skyline should use '0'. Null assumes that all NA intensites are randomly missing.} } \value{ diff --git a/man/dot-getNumSample.Rd b/man/dot-getNumSample.Rd index f294fbdb..b4a9d089 100644 --- a/man/dot-getNumSample.Rd +++ b/man/dot-getNumSample.Rd @@ -14,11 +14,11 @@ ) } \arguments{ -\item{desiredFC}{the range of a desired fold change which includes the lower +\item{desiredFC}{the range of a desired fold change which includes the lower and upper values of the desired fold change.} -\item{power}{a pre-specified statistical power which defined as the probability -of detecting a true fold change. TRUE represent you require to calculate the power +\item{power}{a pre-specified statistical power which defined as the probability +of detecting a true fold change. TRUE represent you require to calculate the power for this category, else you should input the average of power you expect. Default is 0.9} \item{alpha}{significance level} diff --git a/man/dot-groupComparisonWithMultipleCores.Rd b/man/dot-groupComparisonWithMultipleCores.Rd index f4ae9ea1..3a3783e7 100644 --- a/man/dot-groupComparisonWithMultipleCores.Rd +++ b/man/dot-groupComparisonWithMultipleCores.Rd @@ -24,8 +24,8 @@ \item{samples_info}{data.table, output of getSamplesInfo function} -\item{numberOfCores}{Number of cores for parallel processing. -A logfile named `MSstats_groupComparison_log_progress.log` is created to +\item{numberOfCores}{Number of cores for parallel processing. +A logfile named `MSstats_groupComparison_log_progress.log` is created to track progress. Only works for Linux & Mac OS.} } \description{ diff --git a/man/dot-makeConditionPlot.Rd b/man/dot-makeConditionPlot.Rd index dbc11a37..9be1dc0c 100644 --- a/man/dot-makeConditionPlot.Rd +++ b/man/dot-makeConditionPlot.Rd @@ -22,23 +22,23 @@ \arguments{ \item{input}{data.table} -\item{scale}{for "ConditionPlot" only, FALSE(default) means each conditional -level is not scaled at x-axis according to its actual value (equal space at +\item{scale}{for "ConditionPlot" only, FALSE(default) means each conditional +level is not scaled at x-axis according to its actual value (equal space at x-axis). TRUE means each conditional level is scaled at x-axis according to its actual value (unequal space at x-axis).} \item{single_protein}{data.table} -\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and +\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and QC Plot, and "Condition" in Condition Plot. Default is 10.} \item{y.axis.size}{size of y-axis labels. Default is 10.} -\item{text.size}{size of labels represented each condition at the top of +\item{text.size}{size of labels represented each condition at the top of graph in Profile Plot and QC plot. Default is 4.} \item{text.angle}{angle of labels represented each condition at the top -of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. +of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. Default is 0.} \item{legend.size}{size of feature legend (transition-level or peptide-level) diff --git a/man/dot-makeProfilePlot.Rd b/man/dot-makeProfilePlot.Rd index c259fa28..ca154c0c 100644 --- a/man/dot-makeProfilePlot.Rd +++ b/man/dot-makeProfilePlot.Rd @@ -30,20 +30,20 @@ \item{is_censored}{TRUE if censored values were imputed} -\item{featureName}{for "ProfilePlot" only, "Transition" (default) means -printing feature legend in transition-level; "Peptide" means printing feature +\item{featureName}{for "ProfilePlot" only, "Transition" (default) means +printing feature legend in transition-level; "Peptide" means printing feature legend in peptide-level; "NA" means no feature legend printing.} -\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and +\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and QC Plot, and "Condition" in Condition Plot. Default is 10.} \item{y.axis.size}{size of y-axis labels. Default is 10.} -\item{text.size}{size of labels represented each condition at the top of +\item{text.size}{size of labels represented each condition at the top of graph in Profile Plot and QC plot. Default is 4.} \item{text.angle}{angle of labels represented each condition at the top -of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. +of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. Default is 0.} \item{legend.size}{size of feature legend (transition-level or peptide-level) diff --git a/man/dot-makeQCPlot.Rd b/man/dot-makeQCPlot.Rd index 98b17d6c..d9992846 100644 --- a/man/dot-makeQCPlot.Rd +++ b/man/dot-makeQCPlot.Rd @@ -26,31 +26,31 @@ \item{all_proteins}{character vector of protein names} -\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and +\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and QC Plot, and "Condition" in Condition Plot. Default is 10.} \item{y.axis.size}{size of y-axis labels. Default is 10.} -\item{text.size}{size of labels represented each condition at the top of +\item{text.size}{size of labels represented each condition at the top of graph in Profile Plot and QC plot. Default is 4.} \item{text.angle}{angle of labels represented each condition at the top -of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. +of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. Default is 0.} \item{legend.size}{size of feature legend (transition-level or peptide-level) above graph in Profile Plot. Default is 7.} } \description{ -To illustrate the quantitative data after data-preprocessing and -quality control of MS runs, dataProcessPlots takes the quantitative data from -function (\code{\link{dataProcess}}) as input and automatically generate -three types of figures in pdf files as output : -(1) profile plot (specify "ProfilePlot" in option type), -to identify the potential sources of variation for each protein; -(2) quality control plot (specify "QCPlot" in option type), -to evaluate the systematic bias between MS runs; -(3) mean plot for conditions (specify "ConditionPlot" in option type), +To illustrate the quantitative data after data-preprocessing and +quality control of MS runs, dataProcessPlots takes the quantitative data from +function (\code{\link{dataProcess}}) as input and automatically generate +three types of figures in pdf files as output : +(1) profile plot (specify "ProfilePlot" in option type), +to identify the potential sources of variation for each protein; +(2) quality control plot (specify "QCPlot" in option type), +to evaluate the systematic bias between MS runs; +(3) mean plot for conditions (specify "ConditionPlot" in option type), to illustrate mean and variability of each condition per protein. } \details{ @@ -62,21 +62,21 @@ to illustrate mean and variability of each condition per protein. The input of this function is the quantitative data from function \code{\link{dataProcess}}. } \examples{ -# Consider quantitative data (i.e. QuantData) from a yeast study with ten time points of interests, -# three biological replicates, and no technical replicates which is a time-course experiment. -# The goal is to provide pre-analysis visualization by automatically generate two types of figures -# in two separate pdf files. -# Protein IDHC (gene name IDP2) is differentially expressed in time point 1 and time point 7, +# Consider quantitative data (i.e. QuantData) from a yeast study with ten time points of interests, +# three biological replicates, and no technical replicates which is a time-course experiment. +# The goal is to provide pre-analysis visualization by automatically generate two types of figures +# in two separate pdf files. +# Protein IDHC (gene name IDP2) is differentially expressed in time point 1 and time point 7, # whereas, Protein PMG2 (gene name GPM2) is not. -QuantData<-dataProcess(SRMRawData, use_log_file = FALSE) +QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) # Profile plot -dataProcessPlots(data=QuantData,type="ProfilePlot") -# Quality control plot -dataProcessPlots(data=QuantData,type="QCPlot") +dataProcessPlots(data = QuantData, type = "ProfilePlot") +# Quality control plot +dataProcessPlots(data = QuantData, type = "QCPlot") # Quantification plot for conditions -dataProcessPlots(data=QuantData,type="ConditionPlot") +dataProcessPlots(data = QuantData, type = "ConditionPlot") } \keyword{internal} diff --git a/man/dot-makeSummaryProfilePlot.Rd b/man/dot-makeSummaryProfilePlot.Rd index 94a2f935..1811d9a7 100644 --- a/man/dot-makeSummaryProfilePlot.Rd +++ b/man/dot-makeSummaryProfilePlot.Rd @@ -26,16 +26,16 @@ \item{is_censored}{TRUE if censored values were imputed} -\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and +\item{x.axis.size}{size of x-axis labeling for "Run" in Profile Plot and QC Plot, and "Condition" in Condition Plot. Default is 10.} \item{y.axis.size}{size of y-axis labels. Default is 10.} -\item{text.size}{size of labels represented each condition at the top of +\item{text.size}{size of labels represented each condition at the top of graph in Profile Plot and QC plot. Default is 4.} \item{text.angle}{angle of labels represented each condition at the top -of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. +of graph in Profile Plot and QC plot or x-axis labeling in Condition plot. Default is 0.} \item{legend.size}{size of feature legend (transition-level or peptide-level) diff --git a/man/dot-normalizeGlobalStandards.Rd b/man/dot-normalizeGlobalStandards.Rd index fc356f47..d3012239 100644 --- a/man/dot-normalizeGlobalStandards.Rd +++ b/man/dot-normalizeGlobalStandards.Rd @@ -9,10 +9,10 @@ \arguments{ \item{input}{data.table in MSstats format} -\item{peptides_dict}{`data.table` of names of peptides and their corresponding +\item{peptides_dict}{`data.table` of names of peptides and their corresponding features.} -\item{standards}{character vector with names of standards, required if +\item{standards}{character vector with names of standards, required if "GLOBALSTANDARDS" method was selected.} } \description{ diff --git a/man/dot-plotComparison.Rd b/man/dot-plotComparison.Rd index 9e4683cd..fe8df435 100644 --- a/man/dot-plotComparison.Rd +++ b/man/dot-plotComparison.Rd @@ -46,8 +46,8 @@ \item{log_base_FC}{log base for log-fold changes - 2 or 10} -\item{isPlotly}{This parameter is for MSstatsShiny application for plotly -render, this cannot be used for saving PDF files as plotly do not have +\item{isPlotly}{This parameter is for MSstatsShiny application for plotly +render, this cannot be used for saving PDF files as plotly do not have suppprt for PDFs currently. address and isPlotly cannot be set as TRUE at the same time.} } diff --git a/man/dot-plotHeatmap.Rd b/man/dot-plotHeatmap.Rd index cdc66ac7..9f9a6d9e 100644 --- a/man/dot-plotHeatmap.Rd +++ b/man/dot-plotHeatmap.Rd @@ -52,8 +52,8 @@ For Plotly: use this parameter to adjust the number of proteins to be displayed \item{address}{the name of folder that will store the results. Default folder is the current working directory. The other assigned folder has to be existed under the current working directory. An output pdf file is automatically created with the default name of "VolcanoPlot.pdf" or "Heatmap.pdf" or "ComparisonPlot.pdf". The command address can help to specify where to store the file as well as how to modify the beginning of the file name. If address=FALSE, plot will be not saved as pdf file but showed in window.} -\item{isPlotly}{This parameter is for MSstatsShiny application for plotly -render, this cannot be used for saving PDF files as plotly do not have +\item{isPlotly}{This parameter is for MSstatsShiny application for plotly +render, this cannot be used for saving PDF files as plotly do not have suppprt for PDFs currently. address and isPlotly cannot be set as TRUE at the same time.} } diff --git a/man/dot-plotVolcano.Rd b/man/dot-plotVolcano.Rd index 1e532688..ca5c5dc3 100644 --- a/man/dot-plotVolcano.Rd +++ b/man/dot-plotVolcano.Rd @@ -57,8 +57,8 @@ \item{y.axis.size}{size of axes labels, e.g. name of targeted proteins in heatmap. Default is 10.} -\item{isPlotly}{This parameter is for MSstatsShiny application for plotly -render, this cannot be used for saving PDF files as plotly do not have +\item{isPlotly}{This parameter is for MSstatsShiny application for plotly +render, this cannot be used for saving PDF files as plotly do not have suppprt for PDFs currently. address and isPlotly cannot be set as TRUE at the same time.} } diff --git a/man/dot-runTukey.Rd b/man/dot-runTukey.Rd index 038d2911..d9d07d09 100644 --- a/man/dot-runTukey.Rd +++ b/man/dot-runTukey.Rd @@ -11,14 +11,14 @@ \item{is_labeled}{logical, if TRUE, data is coming from an SRM experiment} -\item{censored_symbol}{Missing values are censored or at random. -'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. -'0' uses zero intensities as censored intensity. -In this case, NA intensities are missing at random. -The output from Skyline should use '0'. +\item{censored_symbol}{Missing values are censored or at random. +'NA' (default) assumes that all 'NA's in 'Intensity' column are censored. +'0' uses zero intensities as censored intensity. +In this case, NA intensities are missing at random. +The output from Skyline should use '0'. Null assumes that all NA intensites are randomly missing.} -\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins +\item{remove50missing}{only for summaryMethod = "TMP". TRUE removes the proteins where every run has at least 50\% missing values for each peptide. FALSE is default.} } \value{ diff --git a/man/dot-setCensoredByThreshold.Rd b/man/dot-setCensoredByThreshold.Rd index b3ca6e63..5efc6de0 100644 --- a/man/dot-setCensoredByThreshold.Rd +++ b/man/dot-setCensoredByThreshold.Rd @@ -3,7 +3,7 @@ \name{.setCensoredByThreshold} \alias{.setCensoredByThreshold} \title{Set censored values based on minimum in run/feature/run or feature. -This is used to initialize the AFT imputation model by supplying the maximum +This is used to initialize the AFT imputation model by supplying the maximum possible values for left-censored data as the `time` input to the Surv function.} \usage{ .setCensoredByThreshold(input, censored_symbol, remove50missing) @@ -18,7 +18,7 @@ will be removed} } \description{ Set censored values based on minimum in run/feature/run or feature. -This is used to initialize the AFT imputation model by supplying the maximum +This is used to initialize the AFT imputation model by supplying the maximum possible values for left-censored data as the `time` input to the Surv function. } \keyword{internal} diff --git a/man/extractSDRF.Rd b/man/extractSDRF.Rd index c8fd7c5a..b159eeb7 100644 --- a/man/extractSDRF.Rd +++ b/man/extractSDRF.Rd @@ -14,7 +14,7 @@ extractSDRF( ) } \arguments{ -\item{data}{MSstats formatted data that is the output of a dedicated +\item{data}{MSstats formatted data that is the output of a dedicated converter, such as `MaxQtoMSstatsFormat`, `SkylinetoMSstatsFormat`, ect.} \item{run_name}{Run column name in SDRF data} @@ -26,23 +26,26 @@ converter, such as `MaxQtoMSstatsFormat`, `SkylinetoMSstatsFormat`, ect.} \item{fraction}{Fraction column name in SDRF data (if applicable). Default is `NULL`. If there are no fractions keep `NULL`.} -\item{meta_data}{A data.frame including any additional meta data for the SDRF -file that is not included in MSstats. This meta data will be added into the -final SDRF file. Please ensure the run names in the meta data matches the +\item{meta_data}{A data.frame including any additional meta data for the SDRF +file that is not included in MSstats. This meta data will be added into the +final SDRF file. Please ensure the run names in the meta data matches the run names in the MSstats data.} } \description{ Extract experimental design from MSstats format into SDRF format } \examples{ -mq_ev = data.table::fread(system.file("tinytest/raw_data/MaxQuant/mq_ev.csv", - package = "MSstatsConvert")) -mq_pg = data.table::fread(system.file("tinytest/raw_data/MaxQuant/mq_pg.csv", - package = "MSstatsConvert")) -annot = data.table::fread(system.file("tinytest/raw_data/MaxQuant/annotation.csv", - package = "MSstatsConvert")) -maxq_imported = MaxQtoMSstatsFormat(mq_ev, annot, mq_pg, use_log_file = FALSE) +mq_ev <- data.table::fread(system.file("tinytest/raw_data/MaxQuant/mq_ev.csv", + package = "MSstatsConvert" +)) +mq_pg <- data.table::fread(system.file("tinytest/raw_data/MaxQuant/mq_pg.csv", + package = "MSstatsConvert" +)) +annot <- data.table::fread(system.file("tinytest/raw_data/MaxQuant/annotation.csv", + package = "MSstatsConvert" +)) +maxq_imported <- MaxQtoMSstatsFormat(mq_ev, annot, mq_pg, use_log_file = FALSE) head(maxq_imported) -SDRF_file = extractSDRF(maxq_imported) +SDRF_file <- extractSDRF(maxq_imported) } diff --git a/man/getProcessed.Rd b/man/getProcessed.Rd index 0dd21210..750cebcc 100644 --- a/man/getProcessed.Rd +++ b/man/getProcessed.Rd @@ -16,21 +16,23 @@ data.table processed by dataProcess subfunctions Get feature-level data to be used in the MSstatsSummarizationOutput function } \examples{ -raw = DDARawData -method = "TMP" -cens = "NA" -impute = TRUE +raw <- DDARawData +method <- "TMP" +cens <- "NA" +impute <- TRUE MSstatsConvert::MSstatsLogsSettings(FALSE) -input = MSstatsPrepareForDataProcess(raw, 2, NULL) -input = MSstatsNormalize(input, "EQUALIZEMEDIANS") -input = MSstatsMergeFractions(input) -input = MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) -input_all = MSstatsSelectFeatures(input, "all") # all features -input_5 = MSstatsSelectFeatures(data.table::copy(input), -"topN", top_n = 5) # top 5 features +input <- MSstatsPrepareForDataProcess(raw, 2, NULL) +input <- MSstatsNormalize(input, "EQUALIZEMEDIANS") +input <- MSstatsMergeFractions(input) +input <- MSstatsHandleMissing(input, "TMP", TRUE, "NA", 0.999) +input_all <- MSstatsSelectFeatures(input, "all") # all features +input_5 <- MSstatsSelectFeatures(data.table::copy(input), + "topN", + top_n = 5 +) # top 5 features -proc1 = getProcessed(input_all) -proc2 = getProcessed(input_5) +proc1 <- getProcessed(input_all) +proc2 <- getProcessed(input_5) proc1 proc2 diff --git a/man/groupComparison.Rd b/man/groupComparison.Rd index 40860d8c..33f499b9 100644 --- a/man/groupComparison.Rd +++ b/man/groupComparison.Rd @@ -35,13 +35,13 @@ to an existing log file.} \item{verbose}{logical. If TRUE, information about data processing wil be printed to the console.} -\item{log_file_path}{character. Path to a file to which information about -data processing will be saved. +\item{log_file_path}{character. Path to a file to which information about +data processing will be saved. If not provided, such a file will be created automatically. If `append = TRUE`, has to be a valid path to a file.} -\item{numberOfCores}{Number of cores for parallel processing. When > 1, -a logfile named `MSstats_groupComparison_log_progress.log` is created to +\item{numberOfCores}{Number of cores for parallel processing. When > 1, +a logfile named `MSstats_groupComparison_log_progress.log` is created to track progress. Only works for Linux & Mac OS. Default is 1.} } \value{ @@ -96,23 +96,25 @@ The underlying model fitting functions are lm and lmer for the fixed effects mod The input of this function is the quantitative data from function (dataProcess). } \examples{ -# Consider quantitative data (i.e. QuantData) from yeast study with ten time points of interests, -# three biological replicates, and no technical replicates. +# Consider quantitative data (i.e. QuantData) from yeast study with ten time points of interests, +# three biological replicates, and no technical replicates. # It is a time-course experiment and we attempt to compare differential abundance -# between time 1 and 7 in a set of targeted proteins. -# In this label-based SRM experiment, MSstats uses the fitted model with expanded scope of -# Biological replication. +# between time 1 and 7 in a set of targeted proteins. +# In this label-based SRM experiment, MSstats uses the fitted model with expanded scope of +# Biological replication. QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) levels(QuantData$ProteinLevelData$GROUP) -comparison <- matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) +comparison <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) row.names(comparison) <- "T7-T1" -groups = levels(QuantData$ProteinLevelData$GROUP) +groups <- levels(QuantData$ProteinLevelData$GROUP) colnames(comparison) <- groups[order(as.numeric(groups))] # Tests for differentially abundant proteins with models: # label-based SRM experiment with expanded scope of biological replication. -testResultOneComparison <- groupComparison(contrast.matrix=comparison, data=QuantData, - use_log_file = FALSE) +testResultOneComparison <- groupComparison( + contrast.matrix = comparison, data = QuantData, + use_log_file = FALSE +) # table for result testResultOneComparison$ComparisonResult diff --git a/man/groupComparisonPlots.Rd b/man/groupComparisonPlots.Rd index 6b621ded..97ee951e 100644 --- a/man/groupComparisonPlots.Rd +++ b/man/groupComparisonPlots.Rd @@ -79,17 +79,17 @@ For Plotly: use this parameter to adjust the number of proteins to be displayed \item{address}{the name of folder that will store the results. Default folder is the current working directory. The other assigned folder has to be existed under the current working directory. An output pdf file is automatically created with the default name of "VolcanoPlot.pdf" or "Heatmap.pdf" or "ComparisonPlot.pdf". The command address can help to specify where to store the file as well as how to modify the beginning of the file name. If address=FALSE, plot will be not saved as pdf file but showed in window.} -\item{isPlotly}{This parameter is for MSstatsShiny application for plotly -render, this cannot be used for saving PDF files as plotly do not have +\item{isPlotly}{This parameter is for MSstatsShiny application for plotly +render, this cannot be used for saving PDF files as plotly do not have suppprt for PDFs currently. address and isPlotly cannot be set as TRUE at the same time.} } \description{ -To summarize the results of log-fold changes and adjusted p-values for differentially abundant proteins, -groupComparisonPlots takes testing results from function (\code{\link{groupComparison}}) as input and -automatically generate three types of figures in pdf files as output : -(1) volcano plot (specify "VolcanoPlot" in option type) for each comparison separately; -(2) heatmap (specify "Heatmap" in option type) for multiple comparisons ; +To summarize the results of log-fold changes and adjusted p-values for differentially abundant proteins, +groupComparisonPlots takes testing results from function (\code{\link{groupComparison}}) as input and +automatically generate three types of figures in pdf files as output : +(1) volcano plot (specify "VolcanoPlot" in option type) for each comparison separately; +(2) heatmap (specify "Heatmap" in option type) for multiple comparisons ; (3) comparison plot (specify "ComparisonPlot" in option type) for multiple comparisons per protein. } \details{ @@ -100,40 +100,54 @@ automatically generate three types of figures in pdf files as output : } } \examples{ -QuantData<-dataProcess(SRMRawData, use_log_file = FALSE) +QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) ## based on multiple comparisons (T1 vs T3; T1 vs T7; T1 vs T9) -comparison1<-matrix(c(-1,0,1,0,0,0,0,0,0,0),nrow=1) -comparison2<-matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) -comparison3<-matrix(c(-1,0,0,0,0,0,0,0,1,0),nrow=1) -comparison<-rbind(comparison1,comparison2, comparison3) -row.names(comparison)<-c("T3-T1","T7-T1","T9-T1") -groups = levels(QuantData$ProteinLevelData$GROUP) +comparison1 <- matrix(c(-1, 0, 1, 0, 0, 0, 0, 0, 0, 0), nrow = 1) +comparison2 <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) +comparison3 <- matrix(c(-1, 0, 0, 0, 0, 0, 0, 0, 1, 0), nrow = 1) +comparison <- rbind(comparison1, comparison2, comparison3) +row.names(comparison) <- c("T3-T1", "T7-T1", "T9-T1") +groups <- levels(QuantData$ProteinLevelData$GROUP) colnames(comparison) <- groups[order(as.numeric(groups))] -testResultMultiComparisons<-groupComparison(contrast.matrix=comparison, -data=QuantData, -use_log_file = FALSE) +testResultMultiComparisons <- groupComparison( + contrast.matrix = comparison, + data = QuantData, + use_log_file = FALSE +) testResultMultiComparisons$ComparisonResult # Volcano plot with FDR cutoff = 0.05 and no FC cutoff -groupComparisonPlots(data=testResultMultiComparisons$ComparisonResult, type="VolcanoPlot", -logBase.pvalue=2, address="Ex1_") -# Volcano plot with FDR cutoff = 0.05, FC cutoff = 70, upper y-axis limit = 100, +groupComparisonPlots( + data = testResultMultiComparisons$ComparisonResult, type = "VolcanoPlot", + logBase.pvalue = 2, address = "Ex1_" +) +# Volcano plot with FDR cutoff = 0.05, FC cutoff = 70, upper y-axis limit = 100, # and no protein name displayed # FCcutoff=70 is for demonstration purpose -groupComparisonPlots(data=testResultMultiComparisons$ComparisonResult, type="VolcanoPlot", -FCcutoff=70, logBase.pvalue=2, ylimUp=100, ProteinName=FALSE,address="Ex2_") +groupComparisonPlots( + data = testResultMultiComparisons$ComparisonResult, type = "VolcanoPlot", + FCcutoff = 70, logBase.pvalue = 2, ylimUp = 100, ProteinName = FALSE, address = "Ex2_" +) # Heatmap with FDR cutoff = 0.05 -groupComparisonPlots(data=testResultMultiComparisons$ComparisonResult, type="Heatmap", -logBase.pvalue=2, address="Ex1_") +groupComparisonPlots( + data = testResultMultiComparisons$ComparisonResult, type = "Heatmap", + logBase.pvalue = 2, address = "Ex1_" +) # Heatmap with FDR cutoff = 0.05 and FC cutoff = 70 # FCcutoff=70 is for demonstration purpose -groupComparisonPlots(data=testResultMultiComparisons$ComparisonResult, type="Heatmap", -FCcutoff=70, logBase.pvalue=2, address="Ex2_") +groupComparisonPlots( + data = testResultMultiComparisons$ComparisonResult, type = "Heatmap", + FCcutoff = 70, logBase.pvalue = 2, address = "Ex2_" +) # Comparison Plot -groupComparisonPlots(data=testResultMultiComparisons$ComparisonResult, type="ComparisonPlot", -address="Ex1_") +groupComparisonPlots( + data = testResultMultiComparisons$ComparisonResult, type = "ComparisonPlot", + address = "Ex1_" +) # Comparison Plot -groupComparisonPlots(data=testResultMultiComparisons$ComparisonResult, type="ComparisonPlot", -ylimUp=8, ylimDown=-1, address="Ex2_") +groupComparisonPlots( + data = testResultMultiComparisons$ComparisonResult, type = "ComparisonPlot", + ylimUp = 8, ylimDown = -1, address = "Ex2_" +) } diff --git a/man/groupComparisonQCPlots.Rd b/man/groupComparisonQCPlots.Rd index 66ebd1b0..2a08b602 100644 --- a/man/groupComparisonQCPlots.Rd +++ b/man/groupComparisonQCPlots.Rd @@ -18,7 +18,7 @@ groupComparisonQCPlots( \arguments{ \item{data}{output from function groupComparison.} -\item{type}{choice of visualization. "QQPlots" represents normal quantile-quantile +\item{type}{choice of visualization. "QQPlots" represents normal quantile-quantile plot for each protein after fitting models. "ResidualPlots" represents a plot of residuals versus fitted values for each protein in the dataset.} @@ -30,8 +30,8 @@ of residuals versus fitted values for each protein in the dataset.} \item{height}{height of the saved file. Default is 10.} -\item{which.Protein}{Protein list to draw plots. List can be names of Proteins -or order numbers of Proteins from levels(testResultOneComparison$ComparisonResult$Protein). +\item{which.Protein}{Protein list to draw plots. List can be names of Proteins +or order numbers of Proteins from levels(testResultOneComparison$ComparisonResult$Protein). Default is "all", which generates all plots for each protein.} \item{address}{name that will serve as a prefix to the name of output file.} @@ -40,18 +40,18 @@ Default is "all", which generates all plots for each protein.} produce a pdf file } \description{ -To check the assumption of linear model for whole plot inference, -groupComparisonQCPlots takes the results after fitting models from function -(\code{\link{groupComparison}}) as input and automatically generate two types -of figures in pdf files as output: +To check the assumption of linear model for whole plot inference, +groupComparisonQCPlots takes the results after fitting models from function +(\code{\link{groupComparison}}) as input and automatically generate two types +of figures in pdf files as output: (1) normal quantile-quantile plot (specify "QQPlot" in option type) for checking -normally distributed errors.; +normally distributed errors.; (2) residual plot (specify "ResidualPlot" in option type). } \details{ -Results based on statistical models for whole plot level inference are -accurate as long as the assumptions of the model are met. The model assumes that -the measurement errors are normally distributed with mean 0 and constant variance. +Results based on statistical models for whole plot level inference are +accurate as long as the assumptions of the model are met. The model assumes that +the measurement errors are normally distributed with mean 0 and constant variance. The assumption of a constant variance can be checked by examining the residuals from the model. \itemize{ \item{QQPlots : a normal quantile-quantile plot for each protein is generated in order to check whether the errors are well approximated by a normal distribution. If points fall approximately along a straight line, then the assumption is appropriate for that protein. Only large deviations from the line are problematic.} @@ -62,16 +62,18 @@ The assumption of a constant variance can be checked by examining the residuals QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) levels(QuantData$FeatureLevelData$GROUP) -comparison <- matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) +comparison <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) row.names(comparison) <- "T7-T1" colnames(comparison) <- unique(QuantData$ProteinLevelData$GROUP) # Tests for differentially abundant proteins with models: # label-based SRM experiment with expanded scope of biological replication. -testResultOneComparison <- groupComparison(contrast.matrix=comparison, data=QuantData, -use_log_file = FALSE) +testResultOneComparison <- groupComparison( + contrast.matrix = comparison, data = QuantData, + use_log_file = FALSE +) # normal quantile-quantile plots -groupComparisonQCPlots(data=testResultOneComparison, type="QQPlots", address="") +groupComparisonQCPlots(data = testResultOneComparison, type = "QQPlots", address = "") # residual plots -groupComparisonQCPlots(data=testResultOneComparison, type="ResidualPlots", address="") +groupComparisonQCPlots(data = testResultOneComparison, type = "ResidualPlots", address = "") } diff --git a/man/makePeptidesDictionary.Rd b/man/makePeptidesDictionary.Rd index 0a5131fd..5e34b314 100644 --- a/man/makePeptidesDictionary.Rd +++ b/man/makePeptidesDictionary.Rd @@ -20,8 +20,8 @@ with global standards. It is useful for running the summarization workflow outside of the dataProcess function. } \examples{ -input = data.table::as.data.table(DDARawData) -peptides_dict = makePeptidesDictionary(input, "GLOBALSTANDARDS") +input <- data.table::as.data.table(DDARawData) +peptides_dict <- makePeptidesDictionary(input, "GLOBALSTANDARDS") head(peptides_dict) # ready to be passed to the MSstatsNormalize function } diff --git a/man/modelBasedQCPlots.Rd b/man/modelBasedQCPlots.Rd index 8538ab97..b50bcd47 100644 --- a/man/modelBasedQCPlots.Rd +++ b/man/modelBasedQCPlots.Rd @@ -19,7 +19,7 @@ modelBasedQCPlots( \arguments{ \item{data}{output from function groupComparison.} -\item{type}{choice of visualization. "QQPlots" represents normal quantile-quantile +\item{type}{choice of visualization. "QQPlots" represents normal quantile-quantile plot for each protein after fitting models. "ResidualPlots" represents a plot of residuals versus fitted values for each protein in the dataset.} @@ -31,8 +31,8 @@ of residuals versus fitted values for each protein in the dataset.} \item{height}{height of the saved file. Default is 10.} -\item{which.Protein}{Protein list to draw plots. List can be names of Proteins -or order numbers of Proteins from levels(testResultOneComparison$ComparisonResult$Protein). +\item{which.Protein}{Protein list to draw plots. List can be names of Proteins +or order numbers of Proteins from levels(testResultOneComparison$ComparisonResult$Protein). Default is "all", which generates all plots for each protein.} \item{address}{name that will serve as a prefix to the name of output file.} @@ -41,18 +41,18 @@ Default is "all", which generates all plots for each protein.} produce a pdf file } \description{ -To check the assumption of linear model for whole plot inference, -modelBasedQCPlots takes the results after fitting models from function -(\code{\link{groupComparison}}) as input and automatically generate two types -of figures in pdf files as output: +To check the assumption of linear model for whole plot inference, +modelBasedQCPlots takes the results after fitting models from function +(\code{\link{groupComparison}}) as input and automatically generate two types +of figures in pdf files as output: (1) normal quantile-quantile plot (specify "QQPlot" in option type) for checking -normally distributed errors.; +normally distributed errors.; (2) residual plot (specify "ResidualPlot" in option type). } \details{ -Results based on statistical models for whole plot level inference are -accurate as long as the assumptions of the model are met. The model assumes that -the measurement errors are normally distributed with mean 0 and constant variance. +Results based on statistical models for whole plot level inference are +accurate as long as the assumptions of the model are met. The model assumes that +the measurement errors are normally distributed with mean 0 and constant variance. The assumption of a constant variance can be checked by examining the residuals from the model. \itemize{ \item{QQPlots : a normal quantile-quantile plot for each protein is generated in order to check whether the errors are well approximated by a normal distribution. If points fall approximately along a straight line, then the assumption is appropriate for that protein. Only large deviations from the line are problematic.} @@ -63,16 +63,18 @@ The assumption of a constant variance can be checked by examining the residuals QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) levels(QuantData$FeatureLevelData$GROUP) -comparison <- matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) +comparison <- matrix(c(-1, 0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 1) row.names(comparison) <- "T7-T1" colnames(comparison) <- unique(QuantData$ProteinLevelData$GROUP) # Tests for differentially abundant proteins with models: # label-based SRM experiment with expanded scope of biological replication. -testResultOneComparison <- groupComparison(contrast.matrix=comparison, data=QuantData, -use_log_file = FALSE) +testResultOneComparison <- groupComparison( + contrast.matrix = comparison, data = QuantData, + use_log_file = FALSE +) # normal quantile-quantile plots -modelBasedQCPlots(data=testResultOneComparison, type="QQPlots", address="") +modelBasedQCPlots(data = testResultOneComparison, type = "QQPlots", address = "") # residual plots -modelBasedQCPlots(data=testResultOneComparison, type="ResidualPlots", address="") +modelBasedQCPlots(data = testResultOneComparison, type = "ResidualPlots", address = "") } diff --git a/man/quantification.Rd b/man/quantification.Rd index 110c5331..d83cfab1 100644 --- a/man/quantification.Rd +++ b/man/quantification.Rd @@ -38,8 +38,8 @@ to an existing log file.} \item{verbose}{logical. If TRUE, information about data processing wil be printed to the console.} -\item{log_file_path}{character. Path to a file to which information about -data processing will be saved. +\item{log_file_path}{character. Path to a file to which information about +data processing will be saved. If not provided, such a file will be created automatically. If `append = TRUE`, has to be a valid path to a file.} } @@ -68,13 +68,13 @@ results (data.frame) in a long or matrix format. # Sample quantification shows model-based estimation of protein abundance in each biological # replicate within each time point. # Group quantification shows model-based estimation of protein abundance in each time point. -QuantData<-dataProcess(SRMRawData, use_log_file = FALSE) +QuantData <- dataProcess(SRMRawData, use_log_file = FALSE) head(QuantData$FeatureLevelData) # Sample quantification -sampleQuant<-quantification(QuantData, use_log_file = FALSE) +sampleQuant <- quantification(QuantData, use_log_file = FALSE) head(sampleQuant) # Group quantification -groupQuant<-quantification(QuantData, type="Group", use_log_file = FALSE) +groupQuant <- quantification(QuantData, type = "Group", use_log_file = FALSE) head(groupQuant) } diff --git a/tests/tinytest.R b/tests/tinytest.R index b9681980..1bd81213 100644 --- a/tests/tinytest.R +++ b/tests/tinytest.R @@ -1,3 +1,3 @@ -if ( requireNamespace("tinytest", quietly=TRUE) ){ +if (requireNamespace("tinytest", quietly = TRUE)) { tinytest::test_package("MSstats") } diff --git a/vignettes/MSstats.Rmd b/vignettes/MSstats.Rmd index 9ba8c8b9..a89ca4ee 100644 --- a/vignettes/MSstats.Rmd +++ b/vignettes/MSstats.Rmd @@ -4,8 +4,8 @@ BiocStyle::markdown() ``` ```{r global_options, include=FALSE} -knitr::opts_chunk$set(fig.width=10, fig.height=7, warning=FALSE, message=FALSE) -options(width=110) +knitr::opts_chunk$set(fig.width = 10, fig.height = 7, warning = FALSE, message = FALSE) +options(width = 110) ```