}}
+\describe{
+\item{\code{policy_design_id}}{ID of the policy design to run}
\item{\code{...}}{additional parameters passed to model simulation}
}
@@ -182,6 +197,63 @@ Set TRUE when model compilation is needed (like with odin).}
}
}
\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-R6Experiment-checkpoint}{}}}
+\subsection{Method \code{checkpoint()}}{
+Save a checkpoint of the experiment.
+\subsection{Usage}{
+\if{html}{\out{
}}
+\describe{
+\item{\code{checkpoint_file}}{Directory to save the checkpoint.}
+}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-R6Experiment-run_checkpoint_iteration}{}}}
+\subsection{Method \code{run_checkpoint_iteration()}}{
+Run a single checkpoint iteration
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{R6Experiment$run_checkpoint_iteration(
+ checkpoint_iteration,
+ checkpoint_frequency,
+ remaining_steps,
+ overall_progress,
+ checkpoint_file,
+ completed_steps,
+ backend,
+ ...
+)}\if{html}{\out{
}}
+\describe{
+\item{\code{checkpoint_iteration}}{Current checkpoint iteration.}
+
+\item{\code{checkpoint_frequency}}{Frequency of checkpoints.}
+
+\item{\code{remaining_steps}}{Number of steps remaining in the experiment.}
+
+\item{\code{overall_progress}}{Overall progressor object for tracking progress.}
+
+\item{\code{checkpoint_file}}{Path to the file where the checkpoint will be saved.}
+
+\item{\code{completed_steps}}{Number of steps already completed in the experiment.}
+
+\item{\code{backend}}{Backend to use for parallelization. Options are "future.apply" (default) or "foreach".}
+
+\item{\code{...}}{Additional parameters passed to model simulation.}
+}
+\if{html}{\out{
}}
\if{latex}{\out{\hypertarget{method-R6Experiment-clone}{}}}
\subsection{Method \code{clone()}}{
diff --git a/man/R6Experiment_run.Rd b/man/R6Experiment_run.Rd
deleted file mode 100644
index 4e77ecf..0000000
--- a/man/R6Experiment_run.Rd
+++ /dev/null
@@ -1,40 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/R6Experiment_run.R
-\name{R6Experiment_run}
-\alias{R6Experiment_run}
-\title{Runs R6Experiment in parallel}
-\usage{
-R6Experiment_run(
- self,
- n_cores,
- parallel,
- cluster_eval_script = NULL,
- model_from_cluster_eval,
- cluster_type = "PSOCK",
- packages = NULL,
- ...
-)
-}
-\arguments{
-\item{self}{experiment object}
-
-\item{n_cores}{number of cores to use}
-
-\item{parallel}{whether to evaluate run in parallel}
-
-\item{cluster_eval_script}{path to script that instantiates necessary functions. this will often mean sourcing functions external to the package and loading dependencies for the model. needed if parallel = T}
-
-\item{model_from_cluster_eval}{T if model is instantiated in the cluter eval scripts, F otherwise. Use T if using models that need compilation (like odin) and F otherwise.}
-
-\item{cluster_type}{either "FORK" or "PSOCK".}
-
-\item{packages}{character vector of packages to be loaded before running the model in paralle.}
-
-\item{...}{additional parameters to be passed to the model simulation function.}
-}
-\value{
-results data.frame from all simulations in parallel
-}
-\description{
-This function is most useful to simulate the posterior distribution for a single model in parallel in one machine. This function is not used when calibrating the model and not useful for parallelization across multiple nodes.
-}
diff --git a/man/R6Sim.Rd b/man/R6Sim.Rd
index 1169b65..986992e 100644
--- a/man/R6Sim.Rd
+++ b/man/R6Sim.Rd
@@ -13,39 +13,6 @@ The R6Sim class includes functionality for:
* JSON serialization
* Parallel execution
* Parameter sampling
-}
-\examples{
-# Create simulation model
-MyModel <- R6::R6Class(
- "MyModel",
- inherit = R6Sim,
- public = list(
- initialize = function(name) {
- super$initialize(name)
- self$set_input("population", 1000)
- self$set_input("growth_rate", 0.05)
- },
- simulate = function(...) {
- pop <- self$inputs$population
- growth <- self$inputs$growth_rate
- results <- pop * (1 + growth)^(1:10)
- return(data.frame(year = 1:10, population = results))
- }
- )
-)
-
-model <- MyModel$new("pop_model")
-results <- model$simulate()
-
-
-## ------------------------------------------------
-## Method `R6Sim$set_input`
-## ------------------------------------------------
-
-model$set_input("population", 1000, type = "parameter")
-model$set_input("growth_rates", c(0.01, 0.02), type = "scenario")
-model$set_input("settings", list(iterations = 100), type = "config")
-
}
\section{Public fields}{
\if{html}{\out{
}}
@@ -124,17 +91,6 @@ Accepts numeric, character, logical, data.frame and list inputs.
Type tags enable selective JSON export.
}
-\subsection{Examples}{
-\if{html}{\out{
}}
-\preformatted{model$set_input("population", 1000, type = "parameter")
-model$set_input("growth_rates", c(0.01, 0.02), type = "scenario")
-model$set_input("settings", list(iterations = 100), type = "config")
-
-}
-\if{html}{\out{
}}
-
-}
-
}
\if{html}{\out{
}}
\if{html}{\out{
}}
diff --git a/tests/testthat.R b/tests/testthat.R
index 63c13f0..0182e92 100644
--- a/tests/testthat.R
+++ b/tests/testthat.R
@@ -9,5 +9,12 @@
library(testthat)
library(R6Sim)
library(dplyr)
+library(future)
+
+# Set default parallel backend for tests: multicore, multisession or sequential
+options(parallel_backend = "sequential")
+
+# Set up sequential plan before starting tests.
+future::plan(sequential)
test_check("R6Sim")
diff --git a/tests/testthat/helper-R6Sim.R b/tests/testthat/helper-R6Sim.R
new file mode 100644
index 0000000..b90edff
--- /dev/null
+++ b/tests/testthat/helper-R6Sim.R
@@ -0,0 +1,44 @@
+# This helper file contains shared setup code for R6Sim tests.
+library(dplyr)
+library(tidyr)
+
+# Sample model:
+Mymodel <- R6::R6Class(
+ classname = "Mymodel",
+ inherit = R6Sim,
+ public = list(
+ sim_res = NULL,
+
+ # Custom Initialize function
+ initialize = function(name) {
+ super$initialize(name = name)
+ self$set_input("pop.size", 100)$
+ set_input("risk.mean", 0.01)$
+ set_input("risk.sd", 0.001)$
+ set_input(name = "trials", value = 10)
+ },
+
+ # Sample Simulate function
+ simulate = function() {
+
+ # Create a sample population with some health events:
+ sim_res <- data.frame(
+ p.id = 1:self$inputs$pop.size,
+ risk = self$inputs$risk.mean
+ #risk = rnorm(n = self$inputs$pop.size, mean = self$inputs$risk.mean, sd = self$inputs$risk.sd
+ ) %>%
+ mutate(probability.event = 1 - exp(-risk)) %>%
+ mutate(n.events = self$inputs$pop.size * self$inputs$trials * probability.event) %>%
+ #mutate(n.events = rbinom(n = 1:self$inputs$pop.size, size = self$inputs$trials, prob = probability.event)) %>%
+ group_by() %>%
+ summarise(n.events = sum(n.events))
+
+ invisible(sim_res)
+ }
+ )
+)
+
+# Creating a model object -------------------------------------------------
+
+# Creates a model object and gives it a name.
+model <- Mymodel$new(name = "test")
diff --git a/tests/testthat/test-R6Experiment.R b/tests/testthat/test-R6Experiment.R
new file mode 100644
index 0000000..9758b55
--- /dev/null
+++ b/tests/testthat/test-R6Experiment.R
@@ -0,0 +1,134 @@
+# This file contains tests specifically for the R6Experiment class.
+
+# Ensure all required libraries are loaded
+library(dplyr)
+library(tidyr)
+library(future)
+
+
+# Creating a model object -------------------------------------------------
+
+
+# Test R6Experiment with set_parameter
+
+test_that("R6Experiment works with set_parameter", {
+ experiment <- R6Experiment$new(model)
+
+ experiment$
+ set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("Colonoscopy", "FIT"))$
+ set_parameter(parameter_name = "abc", experimental_design = "lhs", min = 1, max = 10)$
+ set_design(n_lhs = 2)
+
+ expect_true(is.R6Experiment(experiment))
+})
+
+# Test R6Experiment without set_parameter
+
+test_that("R6Experiment works without set_parameter", {
+ experiment <- R6Experiment$new(model)
+ experiment$set_design()
+ expect_true(is.R6Experiment(experiment))
+})
+
+# Test R6Experiment with convert to grid = T
+
+experiment <- R6Experiment$new(model)
+
+experiment$
+ set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("Colonoscopy", "FIT"))$
+ set_parameter(parameter_name = "abc", experimental_design = "lhs", min = 1, max = 10)$
+ set_design(n_lhs = 2, convert_lhs_to_grid = T)
+
+test_that("R6Experiment works with convert to grid = T", {
+ expect_true(is.R6Experiment(experiment))
+})
+
+# Test R6Experiment with pre-existing design
+
+test_that("R6Experiment works with pre-existing design", {
+ experiment <- R6Experiment$new(model)
+ # External grid:
+ grid_design <- expand.grid(c(1:10), c(10:13))
+ # Create an experimental design:
+ experiment$set_design(grid_design_df = grid_design)
+ expect_true(is.R6Experiment(experiment))
+})
+
+# Test R6Experiment runs in parallel using future
+
+test_that("R6Experiment runs in parallel using future", {
+ # Ensure all required libraries are loaded for this test
+ experiment <- R6Experiment$new(model)
+
+ experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
+ experiment$set_design(n_reps = 3)
+
+ # Assign the parallel backend function
+ backend_fn <- R6Sim:::get_parallel_backend_fn()
+
+ # Use the backend function in the test
+ plan(backend_fn)
+
+ # Run in parallel mode with checkpoint_dir
+ temp_checkpoint_dir <- tempfile()
+ results <- experiment$run(checkpoint_dir = temp_checkpoint_dir)
+
+ expect_equal(length(unique(results$rep.id)), 3)
+ expect_equal(nrow(results), nrow(experiment$policy_design))
+ expect_true(all(c("rep.id", "seed") %in% names(results)))
+
+ # Reset future plan to sequential
+ plan(sequential)
+
+ # Close/delete the temporary checkpoint file
+ unlink(temp_checkpoint_dir, recursive = TRUE)
+})
+
+# Test R6Experiment runs sequentially
+
+test_that("R6Experiment runs sequentially", {
+ experiment <- R6Experiment$new(model)
+
+ experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
+ experiment$set_design(n_reps = 3)
+
+ # Run in sequential mode with checkpoint_dir
+ temp_checkpoint_dir <- tempfile()
+ results <- experiment$run(checkpoint_dir = temp_checkpoint_dir)
+
+ expect_equal(length(unique(results$rep.id)), 3)
+ expect_equal(nrow(results), nrow(experiment$policy_design))
+ expect_true(all(c("rep.id", "seed") %in% names(results)))
+
+ # Close/delete the temporary checkpoint file
+ unlink(temp_checkpoint_dir, recursive = TRUE)
+})
+
+# Test R6Experiment produces identical results with the same seed
+
+test_that("R6Experiment produces identical results with the same seed", {
+ # Set the same seed before creating experiments
+ seed_value <- 1234
+ set.seed(seed_value)
+ experiment1 <- R6Experiment$new(model)
+ experiment1$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
+ experiment1$set_design(n_reps = 3, set_seed = TRUE)
+
+ set.seed(seed_value)
+ experiment2 <- R6Experiment$new(model)
+ experiment2$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
+ experiment2$set_design(n_reps = 3, set_seed = TRUE)
+
+ # Run both experiments with checkpoint_dir
+ temp_checkpoint_dir1 <- tempfile()
+ temp_checkpoint_dir2 <- tempfile()
+ results1 <- experiment1$run(checkpoint_dir = temp_checkpoint_dir1)
+ results2 <- experiment2$run(checkpoint_dir = temp_checkpoint_dir2)
+
+ # Verify that results are identical
+ expect_identical(results1, results2)
+
+ # Close/delete the temporary checkpoint files
+ unlink(temp_checkpoint_dir1, recursive = TRUE)
+ unlink(temp_checkpoint_dir2, recursive = TRUE)
+})
diff --git a/tests/testthat/test-R6Experiment_checkpointing.R b/tests/testthat/test-R6Experiment_checkpointing.R
new file mode 100644
index 0000000..4641d75
--- /dev/null
+++ b/tests/testthat/test-R6Experiment_checkpointing.R
@@ -0,0 +1,56 @@
+# Test for checkpointing functionality in R6Experiment
+library(testthat)
+library(future)
+library(doFuture)
+
+# Register multisession for parallel processing
+registerDoFuture()
+
+# Assign the parallel backend function
+backend_fn <- R6Sim:::get_parallel_backend_fn()
+
+# Helper function to create a mock R6Experiment object
+create_mock_experiment <- function() {
+ R6Experiment$new(model)
+}
+
+test_that("Checkpointing saves and resumes correctly", {
+ # Create a mock experiment
+ experiment <- create_mock_experiment()
+
+ full_design <- data.frame(some_param = 1:100)
+
+ # Mock policy design
+ experiment$set_design(grid_design_df = full_design)
+
+ # Temporary directory for checkpoints
+ checkpoint_dir <- tempfile()
+
+ length(unique(experiment$policy_design$policy.exp.id))
+
+ # Run experiment with checkpointing
+ results <- experiment$run(checkpoint_frequency = 10, checkpoint_dir = checkpoint_dir)
+
+ # Check if checkpoint files are created
+ checkpoint_files <- list.files(checkpoint_dir, pattern = "*.rds", full.names = TRUE)
+ expect_true(length(checkpoint_files) > 0)
+
+ # Resume from the last checkpoint
+ last_checkpoint <- checkpoint_files[length(checkpoint_files)]
+
+ checkpoint_experiment <- readRDS(last_checkpoint)
+
+ # Update the design to add new runs
+
+ checkpoint_experiment$set_design(grid_design_df = full_design)
+
+ # Here, it should start from where we left
+ resumed_results <- checkpoint_experiment$run(checkpoint_frequency = 2, checkpoint_dir = checkpoint_dir)
+
+ # Ensure results are consistent
+ expect_equal(nrow(resumed_results), nrow(experiment$policy_design))
+
+ # Clean up checkpoint directory
+ unlink(checkpoint_dir, recursive = TRUE)
+ expect_false(dir.exists(checkpoint_dir))
+})
diff --git a/tests/testthat/test-R6Sim_JSON.R b/tests/testthat/test-R6Sim_JSON.R
new file mode 100644
index 0000000..3db1e9e
--- /dev/null
+++ b/tests/testthat/test-R6Sim_JSON.R
@@ -0,0 +1,60 @@
+# This file contains tests for JSON-related functionalities of the R6Sim class.
+
+# json tests --------------------------------------------------------------
+
+json <- model$to_json()
+
+# Ensure `res` is defined by running the simulation on the original model
+set.seed(1234)
+res <- model$simulate()
+
+# Re-creating the model from json:
+new_model <- Mymodel$new(name = "Mymodel")
+new_model$set_inputs_from_json(json = json)
+
+test_that("to_json and set_input_from_json work", {
+ expect_equal(length(new_model$inputs), length(model$inputs))
+})
+
+test_that("results from a json-converted model is identical to original model", {
+ set.seed(1234)
+ new_res <- new_model$simulate()
+
+ expect_identical(res, new_res)
+})
+
+test_that("to_json preserves inputs and model functionality", {
+ # Create a duplicate model with different input values
+ test_model <- Mymodel$new(name = "test_json_model")
+ test_model$set_input("pop.size", 500)
+ test_model$set_input("risk.mean", 0.05)
+
+ # Convert to JSON and back
+ json_model <- test_model$to_json()
+
+ # Create new model from JSON
+ new_model <- Mymodel$new("new_model")
+ new_model$set_inputs_from_json(json_model)
+
+ # Verify input values were preserved
+ expect_equal(new_model$inputs$pop.size, 500)
+ expect_equal(new_model$inputs$risk.mean, 0.05)
+
+ # Verify simulation results match
+ set.seed(123)
+ result1 <- test_model$simulate()
+
+ set.seed(123)
+ result2 <- new_model$simulate()
+
+ expect_equal(result1, result2)
+})
+
+test_that("set_inputs_from_json handles errors", {
+ # Test error handling for parse errors
+ test_model <- Mymodel$new(name = "test")
+ expect_error(test_model$set_inputs_from_json("invalid json string"))
+
+ # Test handling invalid JSON structure
+ expect_error(test_model$set_inputs_from_json('{"not_inputs": {}}'))
+})
\ No newline at end of file
diff --git a/tests/testthat/test-R6Sim_inputs.R b/tests/testthat/test-R6Sim_inputs.R
new file mode 100644
index 0000000..a4e42d6
--- /dev/null
+++ b/tests/testthat/test-R6Sim_inputs.R
@@ -0,0 +1,78 @@
+# This file contains tests for input handling functionalities of the R6Sim class.
+
+# Test get_inputs from yaml and excel -------------------------------------
+
+test_that("get_inputs works with YAML files", {
+ # Create a temporary YAML file
+ yaml_content <- "
+pop.size: 1000
+risk.mean: 0.15
+risk.sd: 0.02
+trials: 10
+"
+ yaml_file <- tempfile(fileext = ".yaml")
+ writeLines(yaml_content, yaml_file)
+
+ # Create model and test get_inputs
+ model <- Mymodel$new(name = "test")
+ model$get_inputs(yaml_file)
+
+ # Test that inputs were set correctly
+ expect_equal(model$inputs$pop.size, 1000)
+ expect_equal(model$inputs$risk.mean, 0.15)
+ expect_equal(model$inputs$risk.sd, 0.02)
+ expect_equal(model$inputs$trials, 10)
+
+ # Cleanup
+ unlink(yaml_file)
+})
+
+test_that("get_inputs works with Excel files", {
+ # Skip if readxl not available
+ skip_if_not_installed("readxl")
+
+ # Create test workbook
+ wb_file <- tempfile(fileext = ".xlsx")
+
+ # Create data for sheets
+ sheet1_data <- data.frame(
+ pop.size = 2000,
+ risk.mean = 0.25
+ )
+
+ sheet2_data <- data.frame(
+ risk.sd = 0.03,
+ trials = 20
+ )
+
+ # Write Excel file with multiple sheets
+ writexl::write_xlsx(list(
+ Sheet1 = sheet1_data,
+ Sheet2 = sheet2_data
+ ), wb_file)
+
+ # Test get_inputs with Excel file
+ model <- Mymodel$new(name = "test")
+ model$get_inputs(wb_file)
+
+ # Verify inputs were set correctly
+ expect_equal(model$inputs$Sheet1$pop.size, 2000)
+ expect_equal(model$inputs$Sheet1$risk.mean, 0.25)
+ expect_equal(model$inputs$Sheet2$risk.sd, 0.03)
+ expect_equal(model$inputs$Sheet2$trials, 20)
+
+ # Cleanup
+ unlink(wb_file)
+})
+
+test_that("get_inputs handles errors appropriately", {
+ # Test non-existent file
+ model <- Mymodel$new(name = "test")
+ expect_error(model$get_inputs("nonexistent.yaml"))
+
+ # Test invalid file type
+ invalid_file <- tempfile(fileext = ".txt")
+ writeLines("some text", invalid_file)
+ expect_error(model$get_inputs(invalid_file))
+ unlink(invalid_file)
+})
\ No newline at end of file
diff --git a/tests/testthat/test-R6Sim_set_input.R b/tests/testthat/test-R6Sim_set_input.R
new file mode 100644
index 0000000..0dd18c2
--- /dev/null
+++ b/tests/testthat/test-R6Sim_set_input.R
@@ -0,0 +1,18 @@
+test_that("R6Sim_set_input handles replacing inputs with different lengths", {
+ sim <- R6Sim$new()
+ sim$inputs <- list(existing_input = c(1, 2, 3))
+ sim$inputs_table <- data.frame(name = "existing_input", type = "numeric")
+
+ expect_warning(
+ sim$set_input("existing_input", c(1, 2), "numeric")
+ )
+})
+
+test_that("R6Sim_set_input warns for unsupported input types", {
+ sim <- R6Sim$new()
+ unsupported_value <- structure(1, class = "unsupported_class")
+
+ expect_warning(
+ sim$set_input("unsupported_input", unsupported_value, "numeric")
+ )
+})
diff --git a/tests/testthat/test-R6Sim_simulate.R b/tests/testthat/test-R6Sim_simulate.R
new file mode 100644
index 0000000..ca734ae
--- /dev/null
+++ b/tests/testthat/test-R6Sim_simulate.R
@@ -0,0 +1,32 @@
+# This file contains tests for the simulation functionalities of the R6Sim class.
+
+# simulate model ----------------------------------------------------------
+
+set.seed(1234)
+
+res <- model$simulate()
+
+test_that("simulate works", {
+ expect_equal(object = nrow(res), expected = 1)
+})
+
+test_that("both setup_run and simulate methods must be implemented", {
+ # Create a model inheriting from R6Sim without overriding methods
+ BasicModel <- R6::R6Class(
+ classname = "BasicModel",
+ inherit = R6Sim,
+ public = list(
+ initialize = function(name) {
+ super$initialize(name = name)
+ }
+ # No simulate or setup_run implementation
+ )
+ )
+
+ # Create instance
+ basic_model <- BasicModel$new("basic")
+
+ # Methods should throw errors because they must be implemented
+ expect_error(basic_model$setup_run(), "Setup_run method must be implemented by your class")
+ expect_error(basic_model$simulate(), "Simulate method must be implemented by your class")
+})
\ No newline at end of file
diff --git a/tests/testthat/test-test_R6Sim.R b/tests/testthat/test-test_R6Sim.R
deleted file mode 100644
index 2e1fc21..0000000
--- a/tests/testthat/test-test_R6Sim.R
+++ /dev/null
@@ -1,587 +0,0 @@
-
-
-# Sample model:
-
-Mymodel <- R6::R6Class(
- classname = "Mymodel",
- inherit = R6Sim,
- public = list(
- sim_res = NULL,
-
- # Custom Initialize function
- initialize = function(name) {
- super$initialize(name = name)
- self$set_input("pop.size", 100)$
- set_input("risk.mean", 0.01)$
- set_input("risk.sd", 0.001)$
- set_input(name = "trials", value = 10)
- },
-
- # Sample Simulate function
- simulate = function() {
-
- # Create a sample population with some health events:
- sim_res <- data.frame(
- p.id = 1:self$inputs$pop.size,
- risk = rnorm(n = self$inputs$pop.size, mean = self$inputs$risk.mean, sd = self$inputs$risk.sd)
- ) %>%
- mutate(probability.event = 1 - exp(-risk)) %>%
- mutate(n.events = rbinom(n = 1:self$inputs$pop.size, size = self$inputs$trials, prob = probability.event)) %>%
- group_by() %>%
- summarise(n.events = sum(n.events))
-
- invisible(sim_res)
- }
- )
-)
-
-# Creating a model object -------------------------------------------------
-
-# Creates a model object and gives it a name.
-model <- Mymodel$new(name = "test")
-
-test_that("R6Sim was created", {
- expect_true(is.R6Sim(model))
-})
-
-
-# Setting model inputs:
-model$
- set_input(name = "pop.size", value = 1000, type = "settings")$
- set_input(name = "risk.mean", value = 0.15, type = "natural_history")$
- set_input(name = "risk.sd", value = 0.02, type = "natural_history")$
- set_input(name = "trials", value = 10, type = "natural_history")$
- set_input(name = "strategy.id", value = 1, type = "policy")$
- set_input(name = "some_date", value = "2020-01-01", type = "policy")$
- set_input(name = "det.ratios", value = seq.default(from = 0, to = 1, length.out = 101), type = "policy")
-
-# Setting an input twice:
-model$set_input(name = "risk.sd", value = 0.02, type = "natural_history")
-
-test_that("input was created", {
- expect_true(model$inputs$pop.size == 1000)
-})
-
-
-# Set posterior -----------------------------------------------------------
-
-# Loading multiple posterior data.frames:
-posterior.a <- data.frame(
- risk.mean = rnorm(n = 1000, mean = 0, sd = 0.5),
- risk.sd = rnorm(n = 1000, mean = 1, sd = 2),
- weights = 1 / 1000
-)
-# Let's suppose a different calibration resulted in a different posterior:
-posterior.b <- data.frame(
- risk.mean = rnorm(n = 1000, mean = 1, sd = 0.5),
- risk.sd = rnorm(n = 1000, mean = 0.5, sd = 2),
- weights = 1 / 1000
-)
-
-# Here we set the posterior of the model using three posterior files:
-model$set_param_dist(
- params_list = list(pa = posterior.a, pb = posterior.b, pc = posterior.b),
- param_dist_weights = "weights", use_average = F, n_sample = 10
-)
-
-test_that("set_posterior works", {
- expect_true(nrow(model$params_df) == 30)
-})
-
-# Set posterior works without sampling:
-
-model$set_param_dist(
- params_list = list(pa = posterior.a, pb = posterior.b, pc = posterior.b),
- param_dist_weights = "weights", use_average = F, resample = F
-)
-
-
-test_that("set_posterior works without resampling", {
- expect_true(nrow(model$params_df) == 3000)
-})
-
-# Here we set the posterior of the model using three posterior files:
-model$set_param_dist(
- params_list = list(pa = posterior.a, pb = posterior.b, pc = posterior.b),
- param_dist_weights = "weights", use_average = T
-)
-
-test_that("set_posterior works with averages", {
- expect_true(nrow(model$params_df) == 3)
-})
-
-
-
-
-# simulate model ----------------------------------------------------------
-
-set.seed(1234)
-
-res <- model$simulate()
-
-test_that("simulate works", {
- expect_equal(object = nrow(res), expected = 1)
-})
-
-test_that("both setup_run and simulate methods must be implemented", {
- # Create a model inheriting from R6Sim without overriding methods
- BasicModel <- R6::R6Class(
- classname = "BasicModel",
- inherit = R6Sim,
- public = list(
- initialize = function(name) {
- super$initialize(name = name)
- }
- # No simulate or setup_run implementation
- )
- )
-
- # Create instance
- basic_model <- BasicModel$new("basic")
-
- # Methods should throw errors because they must be implemented
- expect_error(basic_model$setup_run(), "Setup_run method must be implemented by your class")
- expect_error(basic_model$simulate(), "Simulate method must be implemented by your class")
-})
-
-
-# json tests --------------------------------------------------------------
-
-json <- model$to_json()
-# Re-creating the model from json:
-new_model <- Mymodel$new(name = "Mymodel")
-new_model$set_inputs_from_json(json = json)
-
-test_that("to_json and set_input_from_json work", {
- expect_equal(length(new_model$inputs), length(model$inputs))
-})
-
-
-test_that("results from a json-converted model is identical to original model", {
- set.seed(1234)
- new_res <- new_model$simulate()
-
- expect_identical(res, new_res)
-})
-
-test_that("to_json preserves inputs and model functionality", {
- # Create a duplicate model with different input values
- test_model <- Mymodel$new(name = "test_json_model")
- test_model$set_input("pop.size", 500)
- test_model$set_input("risk.mean", 0.05)
-
- # Convert to JSON and back
- json_model <- test_model$to_json()
-
- # Create new model from JSON
- new_model <- Mymodel$new("new_model")
- new_model$set_inputs_from_json(json_model)
-
- # Verify input values were preserved
- expect_equal(new_model$inputs$pop.size, 500)
- expect_equal(new_model$inputs$risk.mean, 0.05)
-
- # Verify simulation results match
- set.seed(123)
- result1 <- test_model$simulate()
-
- set.seed(123)
- result2 <- new_model$simulate()
-
- expect_equal(result1, result2)
-})
-
-test_that("set_inputs_from_json handles errors", {
- # Test error handling for parse errors
- test_model <- Mymodel$new(name = "test")
- expect_error(test_model$set_inputs_from_json("invalid json string"))
-
- # Test handling invalid JSON structure
- expect_error(test_model$set_inputs_from_json('{"not_inputs": {}}'))
-})
-
-
-
-# Test get_inputs from yaml and excel -------------------------------------
-
-test_that("get_inputs works with YAML files", {
- # Create a temporary YAML file
- yaml_content <- "
-pop.size: 1000
-risk.mean: 0.15
-risk.sd: 0.02
-trials: 10
-"
- yaml_file <- tempfile(fileext = ".yaml")
- writeLines(yaml_content, yaml_file)
-
- # Create model and test get_inputs
- model <- Mymodel$new(name = "test")
- model$get_inputs(yaml_file)
-
- # Test that inputs were set correctly
- expect_equal(model$inputs$pop.size, 1000)
- expect_equal(model$inputs$risk.mean, 0.15)
- expect_equal(model$inputs$risk.sd, 0.02)
- expect_equal(model$inputs$trials, 10)
-
- # Cleanup
- unlink(yaml_file)
-})
-
-
-test_that("get_inputs works with Excel files", {
- # Skip if readxl not available
- skip_if_not_installed("readxl")
-
- # Create test workbook
- wb_file <- tempfile(fileext = ".xlsx")
-
- # Create data for sheets
- sheet1_data <- data.frame(
- pop.size = 2000,
- risk.mean = 0.25
- )
-
- sheet2_data <- data.frame(
- risk.sd = 0.03,
- trials = 20
- )
-
- # Write Excel file with multiple sheets
- writexl::write_xlsx(list(
- Sheet1 = sheet1_data,
- Sheet2 = sheet2_data
- ), wb_file)
-
- # Test get_inputs with Excel file
- model <- Mymodel$new(name = "test")
- model$get_inputs(wb_file)
-
- # Verify inputs were set correctly
- expect_equal(model$inputs$Sheet1$pop.size, 2000)
- expect_equal(model$inputs$Sheet1$risk.mean, 0.25)
- expect_equal(model$inputs$Sheet2$risk.sd, 0.03)
- expect_equal(model$inputs$Sheet2$trials, 20)
-
- # Cleanup
- unlink(wb_file)
-})
-
-test_that("get_inputs handles errors appropriately", {
- # Test non-existent file
- model <- Mymodel$new(name = "test")
- expect_error(model$get_inputs("nonexistent.yaml"))
-
- # Test invalid file type
- invalid_file <- tempfile(fileext = ".txt")
- writeLines("some text", invalid_file)
- expect_error(model$get_inputs(invalid_file))
- unlink(invalid_file)
-})
-
-
-
-# set_input tests ---------------------------------------------------------
-
-test_that("set_inputs handles unusual inputs", {
- model <- Mymodel$new(name = "test")
-
- # Error on missing parameters
- expect_error(model$set_input())
-
- # Set an initial value to be replaced
- model$set_input("test_length", 10)
-
- # Warning on length mismatch (input change)
- expect_warning(
- model$set_input(name = "test_length", value = c(1,2,3)),
- "You are replacing the input test_length which had length 1 with an object of length 3"
- )
-
- # Warning on unsupported classes
- custom_class <- structure(1, class = "custom_class")
- expect_warning(
- model$set_input(name = "custom_obj", value = custom_class),
- "Input custom_obj includes values using classes that we do not recommend"
- )
-
- # The second warning output from weird classes in list is harder to test exactly
- # So we'll just verify that a warning is thrown
- weird_list <- list(a = structure(1, class = "weird"))
- expect_warning(model$set_input(name = "weird_list", value = weird_list))
-})
-
-
-
-# R6Experiment tests -----------------------------------------------------
-
-test_that("R6Experiment works with set_parameter", {
- experiment <- R6Experiment$new(model)
-
- experiment$
- set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("Colonoscopy", "FIT"))$
- set_parameter(parameter_name = "abc", experimental_design = "lhs", min = 1, max = 10)$
- set_design(n_lhs = 2)
-
- expect_true(is.R6Experiment(experiment))
-})
-
-test_that("R6Experiment works without set_parameter", {
- experiment <- R6Experiment$new(model)
- experiment$set_design()
- expect_true(is.R6Experiment(experiment))
-})
-
-experiment <- R6Experiment$new(model)
-
-experiment$
- set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("Colonoscopy", "FIT"))$
- set_parameter(parameter_name = "abc", experimental_design = "lhs", min = 1, max = 10)$
- set_design(n_lhs = 2, convert_lhs_to_grid = T)
-
-test_that("R6Experiment works with convert to grid = T", {
- expect_true(is.R6Experiment(experiment))
-})
-
-test_that("R6Experiment works with pre-existing design", {
- experiment <- R6Experiment$new(model)
- # External grid:
- grid_design <- expand.grid(c(1:10), c(10:13))
- # Create an experimental design:
- experiment$set_design(grid_design_df = grid_design)
- expect_true(is.R6Experiment(experiment))
-})
-
-
-test_that("R6Experiment supports stochastic replications with seeds", {
- experiment <- R6Experiment$new(model)
-
- # Test with default n_reps=1 and set_seed=T
- experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("A", "B"))
- experiment$set_design(n_lhs = 2, )
-
- # Check experiment has seed column
- expect_true("seed" %in% names(experiment$policy_design))
- expect_equal(length(unique(experiment$policy_design$rep.id)), 1)
- expect_false(any(is.na(experiment$policy_design$seed)))
-
- # Test with n_reps=3 and set_seed=T
- experiment$set_design(n_lhs = 2, n_reps = 3)
- expect_equal(length(unique(experiment$policy_design$rep.id)), 3)
- expect_false(any(is.na(experiment$policy_design$seed)))
- expect_equal(length(unique(experiment$policy_design$seed)), 3)
-
- # Test with set_seed=F
- experiment$set_design(n_lhs = 2, n_reps = 2, set_seed = F)
- expect_true(all(is.na(experiment$policy_design$seed)))
-
- # Test reproducibility with seeds
- set.seed(123)
- experiment$set_design(n_lhs = 2, n_reps = 2)
- seeds1 <- experiment$policy_design$seed
-
- set.seed(123)
- experiment$set_design(n_lhs = 2, n_reps = 2)
- seeds2 <- experiment$policy_design$seed
-
- expect_equal(seeds1, seeds2)
-})
-
-test_that("run_single_experiment sets seeds correctly", {
-
- experiment <- R6Experiment$new(model)
-
- experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
-
- experiment$set_design(n_reps = 2, set_seed = T)
-
- # Get results with same policy_design_id but different seeds
- res1 <- experiment$run()
- res2 <- experiment$run()
-
- # Same policy_design_id should give different results due to different seeds
- expect_true(identical(res1, res2))
-
- # Same policy_design_id and seed should give identical results
- experiment$set_design(n_reps = 2, set_seed = F)
- res3 <- experiment$run()
-
- expect_false(identical(res1, res3))
-})
-
-test_that("R6Experiment runs with replications", {
- experiment <- R6Experiment$new(model)
-
- experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
- experiment$set_design(n_reps = 3)
-
- # Run in sequential mode
- results <- experiment$run(parallel = FALSE)
-
- expect_equal(length(unique(results$rep.id)), 3)
- expect_equal(nrow(results), nrow(experiment$policy_design))
- expect_true(all(c("rep.id", "seed") %in% names(results)))
-})
-
-test_that("R6Experiment basic run works", {
- experiment <- R6Experiment$new(model)
- experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
- experiment$set_design(n_reps = 2)
-
- # Test with default parameters
- results <- experiment$run()
- expect_equal(nrow(results), nrow(experiment$policy_design))
- expect_true("Test1" %in% names(results))
-
- # Check correct number of replications
- expect_equal(length(unique(results$rep.id)), 2)
-})
-
-# Skip parallel tests as they're harder to run consistently
-test_that("R6Experiment parallel tests are skipped", {
- skip("Skipping parallel execution tests")
-
- # Test with PSOCK cluster (default)
- experiment <- R6Experiment$new(model)
- experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c(1, 2))
- experiment$set_design(n_reps = 2)
-
- # These would be run if not skipped
- results <- experiment$run(n_cores = 2, parallel = TRUE, packages = "dplyr")
-
- # Test model_from_cluster_eval = TRUE
- expect_error(
- experiment$run(n_cores = 2, parallel = TRUE, model_from_cluster_eval = TRUE),
- regexp = "cluster_experiment"
- )
-})
-
-
-test_that("R6Experiment handles models with no parameter distributions", {
- # Create two models
- model1 <- Mymodel$new(name = "test1")
- model2 <- Mymodel$new(name = "test2")
-
- # Create experiment with models having no params_df
- experiment <- R6Experiment$new(model1, model2)
- experiment$set_design(n_lhs = 2)
-
- # Check that params_df was created for both models
- expect_false(is.null(experiment$models[[1]]$params_df))
- expect_false(is.null(experiment$models[[2]]$params_df))
-
- # Check params_df structure
- expect_true(nrow(experiment$models[[1]]$params_df) == 1)
- expect_true("param.id" %in% names(experiment$models[[1]]$params_df))
-})
-
-test_that("R6Experiment errors on inconsistent parameter distributions", {
- # Create two models
- model1 <- Mymodel$new(name = "test1")
- model2 <- Mymodel$new(name = "test2")
-
- # Set params_df for only one model
- model1$set_param_dist(
- params_list = list(default = data.frame(weights = 1)),
- param_dist_weights = "weights",
- use_average = TRUE
- )
-
- # Create experiment with inconsistent models
- experiment <- R6Experiment$new(model1, model2)
-
- # Should error when setting design
- expect_error(
- experiment$set_design(n_lhs = 2),
- regexp = "Inconsistent parameter distributions"
- )
-})
-
-test_that("R6Experiment works with all models having parameter distributions", {
- # Create two models
- model1 <- Mymodel$new(name = "test1")
- model2 <- Mymodel$new(name = "test2")
-
- # Set params_df for both models
- params <- list(default = data.frame(weights = 1))
- model1$set_param_dist(params_list = params, param_dist_weights = "weights", use_average = TRUE)
- model2$set_param_dist(params_list = params, param_dist_weights = "weights", use_average = TRUE)
-
- # Create and set up experiment
- experiment <- R6Experiment$new(model1, model2)
- experiment$set_design(n_lhs = 2)
-
- # Check experiment setup worked
- expect_false(is.null(experiment$params))
- expect_false(is.null(experiment$policy_design))
-})
-
-
-
-# Test writing experiments to disk ----------------------------------------
-
-# Implement tests when this is implemented in the package:
-
-# test_that("write_design works with CSV format", {
-# # Setup test experiment
-# model <- Mymodel$new(name = "test")
-# experiment <- R6Experiment$new(model)
-# experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("A", "B"))
-# experiment$set_design(n_lhs = 2)
-#
-# # Create temp directory
-# temp_dir <- file.path(tempdir(), "R6Sim-test")
-# dir.create(temp_dir, showWarnings = FALSE, recursive = TRUE)
-#
-# # Test CSV write
-# experiment$write_design(path = temp_dir, format = "csv")
-# expect_true(file.exists(file.path(temp_dir, "exp_design.txt")))
-# expect_true(file.exists(file.path(temp_dir, "exp_design_col_names.txt")))
-#
-# # Cleanup
-# if (dir.exists(temp_dir)) {
-# unlink(temp_dir, recursive = TRUE)
-# }
-# })
-
-# test_that("write_design works with temp directory", {
-# # Setup test experiment
-# model <- Mymodel$new(name = "test")
-# experiment <- R6Experiment$new(model)
-# experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("A", "B"))
-# experiment$set_design(n_lhs = 2)
-#
-# # Create temp directory that works cross-platform
-# temp_dir <- file.path(tempdir(), "R6Sim-test")
-# dir.create(temp_dir, showWarnings = FALSE, recursive = TRUE)
-#
-# # Test JSON write
-# experiment$write_design(path = temp_dir, format = "json")
-# expect_true(file.exists(file.path(temp_dir, "exp_design.txt")))
-#
-# # Test CSV write
-# experiment$write_design(path = temp_dir, format = "csv")
-# expect_true(file.exists(file.path(temp_dir, "exp_design.txt")))
-# expect_true(file.exists(file.path(temp_dir, "exp_design_col_names.txt")))
-#
-# # Cleanup
-# if (dir.exists(temp_dir)) {
-# unlink(temp_dir, recursive = TRUE)
-# }
-# })
-#
-# test_that("write_design returns experimental design when no path provided", {
-# # Setup
-# model <- Mymodel$new(name = "test")
-# experiment <- R6Experiment$new(model)
-# experiment$set_parameter(parameter_name = "Test1", experimental_design = "grid", values = c("A", "B"))
-# experiment$set_design(n_lhs = 2)
-#
-# # Test return value when no path provided
-# result <- experiment$write_design(format = "json")
-# expect_type(result, "character")
-# expect_gt(length(result), 0)
-# })
-
-
diff --git a/vignettes/introduction.Rmd b/vignettes/introduction.Rmd
index d0ac8b1..f0ab4a3 100644
--- a/vignettes/introduction.Rmd
+++ b/vignettes/introduction.Rmd
@@ -102,7 +102,12 @@ experiment$
set_parameter(parameter_name = "risk.mean",experimental_design = "grid",values = seq.default(from = 0.01, to = 0.05, by = 0.01))$
set_design()
-results <- experiment$run(parallel = F, model_from_cluster_eval = F)
+# This package uses future.apply to run experiments, so you can run experments
+# in parallel by doing
+# library(future)
+# future::plan(multisession, workers = 2)
+
+results <- experiment$run()
# model.id =1 is the first model passed to experiment, and so on:
results %>%