From 99ac25bc1acf1e6411f0e0f610305a09ebe8c404 Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 2 Oct 2025 14:34:00 -0500 Subject: [PATCH 1/9] Add APOSMM nlopt example --- examples/libe_aposmm_nlopt/analysis_script.py | 224 +++++++++++++++++ examples/libe_aposmm_nlopt/bunch_utils.py | 189 ++++++++++++++ .../libe_aposmm_nlopt/check_map_v_unmapped.py | 49 ++++ examples/libe_aposmm_nlopt/note.txt | 1 + examples/libe_aposmm_nlopt/run_example.py | 176 +++++++++++++ .../template_simulation_script.py | 234 ++++++++++++++++++ 6 files changed, 873 insertions(+) create mode 100755 examples/libe_aposmm_nlopt/analysis_script.py create mode 100755 examples/libe_aposmm_nlopt/bunch_utils.py create mode 100644 examples/libe_aposmm_nlopt/check_map_v_unmapped.py create mode 100644 examples/libe_aposmm_nlopt/note.txt create mode 100755 examples/libe_aposmm_nlopt/run_example.py create mode 100755 examples/libe_aposmm_nlopt/template_simulation_script.py diff --git a/examples/libe_aposmm_nlopt/analysis_script.py b/examples/libe_aposmm_nlopt/analysis_script.py new file mode 100755 index 00000000..96403d04 --- /dev/null +++ b/examples/libe_aposmm_nlopt/analysis_script.py @@ -0,0 +1,224 @@ +"""Defines the analysis function that runs after the simulation.""" + +import os + +import numpy as np +import matplotlib.pyplot as plt +import visualpic as vp +from aptools.plotting.quick_diagnostics import ( + phase_space_overview, + slice_analysis, +) + + +def bin_and_analyze_particles(z, pz, w, num_bins): + """ + Bin particles based on their longitudinal positions and perform analysis on each bin. + + Parameters + ---------- + z : array + Array of longitudinal positions of the particles. + pz : array + Array of longitudinal momentum of the particles. + w : array + Array of weights of the particles. + num_bins : int + Number of bins to divide the particles into. + + Returns + ------- + gamma_avgs : list + List of dictionaries containing the averages for each bin. + """ + # Create bins + bin_nparts, bin_edges = np.histogram(z, bins=num_bins, weights=w) + + # Find the bin indices for each particle + bin_indices = np.digitize(z, bin_edges) - 1 + + # Calculate particle gamma from longitudinal momentum (almost irrelevant since ultra-relativistic) + gamma = np.sqrt(pz**2 + 1) + + # Initialize list to hold the results + bin_gammas = [] + + # Analyze each bin + for i in range(num_bins): + # Select particles in the current bin + mask = bin_indices == i + bin_gamma = gamma[mask] + bin_w = w[mask] + + try: + # Compute average and particle number per bin + avg_gamma = np.average(bin_gamma, weights=bin_w) + except: + avg_gamma = 0 # invalid for when all weights in the bin are zero + + # Store the results + bin_gammas.append(avg_gamma) + + bin_gammas = np.array(bin_gammas) + # Compute mean gamma among all beams + + try: + # Compute mean gamma among all beams + mean_gamma = np.average(bin_gammas, weights=bin_nparts) + except: + mean_gamma = 0 # invalid for when all weights in the bin are zero + + # Compute standard deviations + try: + std_gamma = np.sqrt( + np.average((bin_gammas - mean_gamma) ** 2, weights=bin_nparts) + ) + except: + std_gamma = 9e9 # invalid for when all weights in the bin are zero + + return mean_gamma, std_gamma, bin_gammas, bin_nparts + + +def analyze_simulation( + simulation_directory, + output_params, + ref_bunch_vals={"q_tot_pC": 200, "energy_spread": 5e-3}, + num_bins=10, + make_plots=True, + remove_all_diags_but_last=True, +): + """Analyze the output of the simulation.""" + # Load data. + diags_dir = os.path.join(simulation_directory, "diags/hdf5") + dc = vp.DataContainer("openpmd", diags_dir) + dc.load_data() + + # Get final bunch distribution. + bunch = dc.get_species("bunch") + ts = bunch.timesteps + bunch_data = bunch.get_data(ts[-1]) + w = bunch_data["w"][0] + x = bunch_data["x"][0] + y = bunch_data["y"][0] + z = bunch_data["z"][0] + px = bunch_data["px"][0] + py = bunch_data["py"][0] + pz = bunch_data["pz"][0] + q = bunch_data["q"][0] # charge is already weighted, apparently + + # Remove particles with pz < 100 + pz_filter = np.where(pz >= 100) + w = w[pz_filter] + x = x[pz_filter] + y = y[pz_filter] + z = z[pz_filter] + px = px[pz_filter] + py = py[pz_filter] + pz = pz[pz_filter] + q = q[pz_filter] + + # Bin and analyze the particles + mean_gamma, std_gamma, bin_averages, bin_nparts = bin_and_analyze_particles( + z, pz, w, num_bins + ) + + # Calculate relevant parameters. + q_tot = np.abs(np.sum(q)) * 1e12 # total charge (pC) + q_ref = ref_bunch_vals["q_tot_pC"] # bunch charge (pC) : 200 pC by default + + energy_spread_ref = ref_bunch_vals["energy_spread"] + energy_spread = std_gamma / mean_gamma + + # Calculate objective. + f = np.log(mean_gamma * q_tot / q_ref / (energy_spread / energy_spread_ref)) + + # Store quantities in output + output_params["f"] = -f + output_params["charge"] = q_tot + output_params["mean_gamma"] = mean_gamma + output_params["std_gamma"] = std_gamma + output_params["charge"] = ( + q_tot # Ensure q_tot is defined and correct #SH duplicate line.... + ) + # Add other parameters as needed + for i in range(num_bins): + output_params[f"bin_gammas_{i+1}"] = bin_averages[i] + output_params[f"bin_nparts_{i+1}"] = bin_nparts[i] + + # Save objective to file (for convenience). + np.savetxt("f.txt", np.array([f])) + + # Make plots. + if make_plots: + try: + plt.figure() + slice_analysis(x, y, z, px, py, pz, q, show=False) + plt.savefig("final_lon_phase_space.png") + plt.figure() + phase_space_overview(x, y, z, px, py, pz, q, show=False) + plt.savefig("final_phase_space.png") + except Exception: + print("Failed to make plots.") + + if remove_all_diags_but_last: + # Remove all diagnostics except last file. + try: + for file in sorted(os.listdir(diags_dir))[:-1]: + file_path = os.path.join(diags_dir, file) + os.remove(file_path) + except Exception: + print("Could not remove diagnostics.") + + return output_params + + +def weighted_mad(x, w): + """Calculate weighted median absolute deviation.""" + med = weighted_median(x, w) + mad = weighted_median(np.abs(x - med), w, quantile=0.5) + return med, mad + + +def weighted_median(data, weights, quantile=0.5): + """Compute the weighted quantile of a 1D numpy array. + + Parameters + ---------- + data : ndarray + Input array (one dimension). + weights : ndarray + Array with the weights of the same size of `data`. + quantile : float + Quantile to compute. It must have a value between 0 and 1. + + Returns + ------- + quantile_1D : float + The output value. + """ + # Check the data + if not isinstance(data, np.matrix): + data = np.asarray(data) + if not isinstance(weights, np.matrix): + weights = np.asarray(weights) + nd = data.ndim + if nd != 1: + raise TypeError("data must be a one dimensional array") + ndw = weights.ndim + if ndw != 1: + raise TypeError("weights must be a one dimensional array") + if data.shape != weights.shape: + raise TypeError("the length of data and weights must be the same") + if (quantile > 1.0) or (quantile < 0.0): + raise ValueError("quantile must have a value between 0. and 1.") + # Sort the data + ind_sorted = np.argsort(data) + sorted_data = data[ind_sorted] + sorted_weights = weights[ind_sorted] + # Compute the auxiliary arrays + Sn = np.cumsum(sorted_weights) + # TODO: Check that the weights do not sum zero + # assert Sn != 0, "The sum of the weights must not be zero" + Pn = (Sn - 0.5 * sorted_weights) / Sn[-1] + # Get the value of the weighted median + return np.interp(quantile, Pn, sorted_data) diff --git a/examples/libe_aposmm_nlopt/bunch_utils.py b/examples/libe_aposmm_nlopt/bunch_utils.py new file mode 100755 index 00000000..9a776d05 --- /dev/null +++ b/examples/libe_aposmm_nlopt/bunch_utils.py @@ -0,0 +1,189 @@ +"""Defines utilities for generating particle bunches.""" + +import numpy as np +from scipy.constants import c + + +def gaussian_bunch( + q_tot, n_part, gamma0, s_g, s_z, emit_x, s_x, zf=0.0, tf=0, x_c=0.0 +): + """Create a Gaussian particle bunch.""" + n_part = int(n_part) + + np.random.seed(42) + z = zf + s_z * np.random.standard_normal(n_part) + x = x_c + s_x * np.random.standard_normal(n_part) + y = s_x * np.random.standard_normal(n_part) + + gamma = np.random.normal(gamma0, s_g, n_part) + + s_ux = emit_x / s_x + ux = s_ux * np.random.standard_normal(n_part) + uy = s_ux * np.random.standard_normal(n_part) + + uz = np.sqrt((gamma**2 - 1) - ux**2 - uy**2) + + if tf != 0.0: + x = x - ux * c * tf / gamma + y = y - uy * c * tf / gamma + z = z - uz * c * tf / gamma + + q = np.ones(n_part) * q_tot / n_part + + return x, y, z, ux, uy, uz, q + + +def flattop_bunch( + q_tot, + n_part, + gamma0, + s_g, + length, + s_z, + emit_x, + s_x, + emit_y, + s_y, + zf=0.0, + tf=0, + x_c=0.0, + y_c=0, +): + """Create a flat-top particle bunch.""" + n_part = int(n_part) + + norma = length + np.sqrt(2 * np.pi) * s_z + n_plat = int(n_part * length / norma) + n_gaus = int(n_part * np.sqrt(2 * np.pi) * s_z / norma) + + # Create flattop and gaussian profiles + z_plat = np.random.uniform(0.0, length, n_plat) + z_gaus = s_z * np.random.standard_normal(n_gaus) + + # Concatenate both profiles + z = np.concatenate( + ( + z_gaus[np.where(z_gaus <= 0)], + z_plat, + z_gaus[np.where(z_gaus > 0)] + length, + ) + ) + + z = z - length / 2.0 + zf # shift to final position + + n_part = len(z) + x = x_c + s_x * np.random.standard_normal(n_part) + y = y_c + s_y * np.random.standard_normal(n_part) + + gamma = np.random.normal(gamma0, s_g, n_part) + + s_ux = emit_x / s_x + ux = s_ux * np.random.standard_normal(n_part) + + s_uy = emit_y / s_y + uy = s_uy * np.random.standard_normal(n_part) + + uz = np.sqrt((gamma**2 - 1) - ux**2 - uy**2) + + if tf != 0.0: + x = x - ux * c * tf / gamma + y = y - uy * c * tf / gamma + z = z - uz * c * tf / gamma + + q = np.ones(n_part) * q_tot / n_part + + return x, y, z, ux, uy, uz, q + + +def trapezoidal_bunch( + i0, # Initial current (at the beginning of the trapezoid) + i1, # Final current (at the end of the trapezoid) + n_part, # Number of particles in the bunch + gamma0, # Central value of the relativistic gamma factor + s_g, # Standard deviation of the gamma factor + length, # Length of the trapezoidal bunch + s_z, # Standard deviation of the longitudinal distribution + emit_x, # Normalized emittance in the x-direction + s_x, # Standard deviation of the x-position distribution + emit_y, # Normalized emittance in the y-direction + s_y, # Standard deviation of the y-position distribution + zf=0.0, # Final z position + tf=0, # Final time + x_c=0.0, # x-center position + y_c=0.0, # y-center position +): + """Create a trapezoidal particle bunch.""" + n_part = int(n_part) # Ensure the number of particles is an integer + + # Calculate charges for the plateau, triangular, and Gaussian sections of the bunch + q_plat = (min(i0, i1) / c) * length + q_triag = ((max(i0, i1) - min(i0, i1)) / c) * length / 2.0 + q_gaus0 = (i0 / c) * np.sqrt(2 * np.pi) * s_z / 2.0 + q_gaus1 = (i1 / c) * np.sqrt(2 * np.pi) * s_z / 2.0 + q_tot = q_plat + q_triag + q_gaus0 + q_gaus1 # Total charge + + # Determine the number of particles in each section + n_plat = int(n_part * q_plat / q_tot) + n_triag = int(n_part * q_triag / q_tot) + n_gaus0 = int(n_part * q_gaus0 / q_tot) + n_gaus1 = int(n_part * q_gaus1 / q_tot) + + np.random.seed(42) # Seed for reproducibility + z_plat = np.random.uniform( + 0.0, length, n_plat + ) # Uniform distribution for plateau + if i0 <= i1: + z_triag = np.random.triangular( + 0.0, length, length, n_triag + ) # Triangular distribution (rising) + else: + z_triag = np.random.triangular( + 0.0, 0.0, length, n_triag + ) # Triangular distribution (falling) + z_gaus0 = s_z * np.random.standard_normal( + 2 * n_gaus0 + ) # Gaussian distribution for initial current + z_gaus1 = s_z * np.random.standard_normal( + 2 * n_gaus1 + ) # Gaussian distribution for final current + + # Concatenate distributions and adjust positions + z = np.concatenate( + ( + z_gaus0[np.where(z_gaus0 < 0)], + z_plat, + z_triag, + z_gaus1[np.where(z_gaus1 > 0)] + length, + ) + ) + + z = z - length / 2.0 + zf # Shift to final position + + n_part = len(z) # Recalculate the number of particles + x = x_c + s_x * np.random.standard_normal( + n_part + ) # x-positions with Gaussian spread + y = y_c + s_y * np.random.standard_normal( + n_part + ) # y-positions with Gaussian spread + + gamma = np.random.normal(gamma0, s_g, n_part) # Gamma distribution + + s_ux = emit_x / s_x # x-momentum spread + ux = s_ux * np.random.standard_normal(n_part) # x-momenta + + s_uy = emit_y / s_y # y-momentum spread + uy = s_uy * np.random.standard_normal(n_part) # y-momenta + + uz = np.sqrt( + (gamma**2 - 1) - ux**2 - uy**2 + ) # z-momenta from relativistic relation + + if tf != 0.0: # Adjust positions and momenta if final time is specified + x = x - ux * c * tf / gamma + y = y - uy * c * tf / gamma + z = z - uz * c * tf / gamma + + q = np.ones(n_part) * q_tot / n_part # Charge per particle + + return x, y, z, ux, uy, uz, q # Return particle properties diff --git a/examples/libe_aposmm_nlopt/check_map_v_unmapped.py b/examples/libe_aposmm_nlopt/check_map_v_unmapped.py new file mode 100644 index 00000000..664cfc73 --- /dev/null +++ b/examples/libe_aposmm_nlopt/check_map_v_unmapped.py @@ -0,0 +1,49 @@ +"""Module for checking consistency between mapped and unmapped data.""" + + +def check_mapped_vs_unmapped(H, H_unmapped, print_rows=True): + """Check that mapped and unmapped data are consistent.""" + # Compare mapped vs unmapped for first few rows + if print_rows: + print("\nMapped vs Unmapped comparison:") + for i in range(min(3, len(H))): + print(f"\nRow {i}:") + print( + f" Mapped - sim_id: {H['sim_id'][i]}, x: {H['x'][i]}, x_on_cube: {H['x_on_cube'][i]}" + ) + print( + f" Unmapped - sim_id: {H_unmapped['sim_id'][i]}, " + + f"beam_i_r2: {H_unmapped['beam_i_r2'][i]}, beam_z_i_2: {H_unmapped['beam_z_i_2'][i]}, " + + f"beam_length: {H_unmapped['beam_length'][i]}, beam_i_r2_on_cube: {H_unmapped['beam_i_r2_on_cube'][i]}, " + + f"beam_z_i_2_on_cube: {H_unmapped['beam_z_i_2_on_cube'][i]}, beam_length_on_cube: {H_unmapped['beam_length_on_cube'][i]}" + ) + + for i in range(len(H)): + # Check sim_id matches + assert ( + H["sim_id"][i] == H_unmapped["sim_id"][i] + ), f"sim_id mismatch at row {i}" + + # Check x array matches individual variables + assert ( + H["x"][i][0] == H_unmapped["beam_i_r2"][i] + ), f"x[0] != beam_i_r2 at row {i}" + assert ( + H["x"][i][1] == H_unmapped["beam_z_i_2"][i] + ), f"x[1] != beam_z_i_2 at row {i}" + assert ( + H["x"][i][2] == H_unmapped["beam_length"][i] + ), f"x[2] != beam_length at row {i}" + + # Check x_on_cube array matches individual variables + assert ( + H["x_on_cube"][i][0] == H_unmapped["beam_i_r2_on_cube"][i] + ), f"x_on_cube[0] != beam_i_r2_on_cube at row {i}" + assert ( + H["x_on_cube"][i][1] == H_unmapped["beam_z_i_2_on_cube"][i] + ), f"x_on_cube[1] != beam_z_i_2_on_cube at row {i}" + assert ( + H["x_on_cube"][i][2] == H_unmapped["beam_length_on_cube"][i] + ), f"x_on_cube[2] != beam_length_on_cube at row {i}" + + print("\nMapped and unmapped data are consistent!") diff --git a/examples/libe_aposmm_nlopt/note.txt b/examples/libe_aposmm_nlopt/note.txt new file mode 100644 index 00000000..f6482c0b --- /dev/null +++ b/examples/libe_aposmm_nlopt/note.txt @@ -0,0 +1 @@ +From run_aposmm_nlopt_with_export_checks.py - in addition to the ensemble run this tests different export options. More in fitting with test than example. diff --git a/examples/libe_aposmm_nlopt/run_example.py b/examples/libe_aposmm_nlopt/run_example.py new file mode 100755 index 00000000..025259ad --- /dev/null +++ b/examples/libe_aposmm_nlopt/run_example.py @@ -0,0 +1,176 @@ +""" +Optimization of an LPA with APOSMM/nlopt and Wake-T. + +Export options can also be tested. +""" + +import numpy as np + +# from multiprocessing import set_start_method + +import libensemble.gen_funcs + +libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt" + +from libensemble.gen_classes import APOSMM +from optimas.generators import ExternalGenerator +from libensemble.tools import add_unique_random_streams + +from gest_api.vocs import VOCS +from optimas.evaluators import TemplateEvaluator +from optimas.explorations import Exploration + +from analysis_script import analyze_simulation + +check_map_v_unmapped = True +if check_map_v_unmapped: + from check_map_v_unmapped import check_mapped_vs_unmapped + + +# Number of simulation batches, their size, and the maximum number of simulations +n_batches = 10 # 8 +batch_size = 4 # 24 + +initial_sample = batch_size # *4 +max_evals = n_batches * batch_size # + initial_sample +nworkers = batch_size + + +# Create varying parameters and objectives. +mcr = 1e-2 # minimal current ratio +# back current ratio: sum with front equals to 1 and they get doubled and multiplied with the average current + +# Single source of truth for variable definitions +vars_std = { + "beam_i_r2": [mcr, 1.0 - mcr], + "beam_z_i_2": [-20.0, 20.0], # µm + "beam_length": [1.0, 20.0], # µm + "beam_i_r2_on_cube": [0, 1.0], + "beam_z_i_2_on_cube": [0, 1.0], + "beam_length_on_cube": [0, 1.0], +} +n = 3 + +# Create VOCS object +# start for bin results +bin_start = 4 +# number of bins for structure-exploiting optimization (note this is nlopt so not using) +nbins = 10 + +# Build observables set with all parameters +observables_set = { + "mean_gamma", # arithmetic mean + "std_gamma", # standard deviation + "charge", # Track charge to see if we lost any particles +} + +# Note: This nlopt example does not use these fields but this tests the setup. +# Add bin results to observables +for i in range(nbins): + observables_set.add(f"bin_gammas_{i+1}") # average gammas per bin + +for i in range(10): + observables_set.add(f"bin_nparts_{i+1}") # number of particles per bin + +vocs = VOCS( + variables=vars_std, + objectives={"f": "MINIMIZE"}, + observables=observables_set, +) + +for obs in vocs.observables: + print(obs) + + +# Set up APOSMM generator +persis_info = add_unique_random_streams({}, 5)[ + 1 +] # SH Dont need the 5.Better to have APOSMM defaults. +persis_info["nworkers"] = ( + nworkers # SH - not taking account of gen_on_manager in APOSMM +) + + +variables_mapping = { + "x": ["beam_i_r2", "beam_z_i_2", "beam_length"], + "x_on_cube": [ + "beam_i_r2_on_cube", + "beam_z_i_2_on_cube", + "beam_length_on_cube", + ], +} + +aposmm = APOSMM( + vocs=vocs, + variables_mapping=variables_mapping, + persis_info=persis_info, + initial_sample_size=initial_sample, + sample_points=np.atleast_2d(0.1 * (np.arange(n) + 1)), + localopt_method="LN_BOBYQA", + rk_const=1e-4, # 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi), + run_max_eval=100 * (n + 1), + max_active_runs=batch_size, + dist_to_bound_multiple=0.5, + ftol_rel=1e-8, +) + +# Create generator. +gen = ExternalGenerator( + ext_gen=aposmm, + vocs=vocs, + save_model=True, +) + +# Create evaluator. +ev = TemplateEvaluator( + sim_template="template_simulation_script.py", + analysis_func=analyze_simulation, + sim_files=["bunch_utils.py"], +) + +# Create exploration. +exp = Exploration( + generator=gen, + evaluator=ev, + max_evals=max_evals, + sim_workers=nworkers, + run_async=False, # SH - also try with True + exploration_dir_path="./exploration_0", +) + + +if __name__ == "__main__": + # set_start_method("spawn") + + # Test running export when no data (optional test) + empty_result = aposmm.export() + assert empty_result == ( + None, + None, + None, + ), f"Expected (None, None, None) but got {empty_result}" + + # Run exploration + exp.run() + + if exp.is_manager: + aposmm.finalize() + + # Get data in gen format and in user format + H, _, _ = aposmm.export() + H_unmapped, _, _ = aposmm.export(user_fields=True) + + # H_dicts, _, _ = aposmm.export(as_dicts=True) + # H_dicts, _, _ = aposmm.export(as_dicts=True, user_fields=True) + # print(f"\n\nH_dicts: {H_dicts}") + + # Check data consistency if enabled + if check_map_v_unmapped: + check_mapped_vs_unmapped(H, H_unmapped, print_rows=True) + + # Check sampling followed by optimization runs + assert not np.any(H_unmapped["local_pt"][:initial_sample]) + assert np.all(H_unmapped["local_pt"][initial_sample:]) + + np.save("H_final.npy", H) + np.save("H_final_unmapped.npy", H_unmapped) diff --git a/examples/libe_aposmm_nlopt/template_simulation_script.py b/examples/libe_aposmm_nlopt/template_simulation_script.py new file mode 100755 index 00000000..1a06b5bd --- /dev/null +++ b/examples/libe_aposmm_nlopt/template_simulation_script.py @@ -0,0 +1,234 @@ +"""Script for simulating an LPA with external injection with Wake-T. + +The beam current, position and length are parameters exposed to the optimizer to +try to achieve optimal beam loading. +""" + +import numpy as np +import scipy.constants as sc + +from wake_t import ( + ParticleBunch, + GaussianPulse, + PlasmaStage, + ActivePlasmaLens, + Drift, + Beamline, +) +from wake_t.beamline_elements import FieldElement +from bunch_utils import trapezoidal_bunch +from wake_t.diagnostics import analyze_bunch + + +def beta_gamma_from_GeV(m_species, E_kin_GeV): + """Calculate beta*gamma from kinetic energy in GeV.""" + E_rest_GeV = m_species * sc.c**2 / sc.electron_volt / 1e9 + + return np.sqrt((E_kin_GeV / E_rest_GeV + 1) ** 2 - 1) + + +def gamma_from_GeV(m_species, E_kin_GeV): + """Calculate gamma from kinetic energy in GeV.""" + E_rest_GeV = m_species * sc.c**2 / sc.electron_volt / 1e9 + + return 1 + E_kin_GeV / E_rest_GeV + + +def calculate_critical_density(wavelength): + """Calculate the critical density for a given laser wavelength.""" + omega_L = 2 * np.pi * sc.c / wavelength + return sc.m_e * sc.epsilon_0 * omega_L**2 / sc.elementary_charge**2 + + +def matched_beta(gamma, lambda_L, n_e): + """Match condition for full blowout.""" + n_c = calculate_critical_density(lambda_L) + return np.sqrt(2 * gamma) * lambda_L * np.sqrt(n_c / n_e) / (2 * np.pi) + + +# General simulation parameters. +n_p0 = 1.7e23 +q_tot = ( + 196 * 1e-12 +) # total charge (C) .. is smaller than the actual charge will be due to the Gaussian tails (should come out at 200 pC) + +# Energy values and corresponding beta function values from calculations and measurements +# (previous optimization at 1 fC charge) +energies_GeV = np.array([1, 10, 100, 1e3, 1e4]) # initial beam energy +betas_fb = np.array( + [8.065e-4, 2.550e-3, 8.063e-3, 2.550e-2, 8.063e-2] +) # analytical value for full blowout +betas_opt = np.array( + [9.330e-4, 2.775e-3, 7.746e-6, 1.721e-2, 3.644e-2] +) # measured in optimization study + + +def simulate_plasma_stage( + beam_i_r2=0.8, beam_z_i_2=0.0, beam_length_um=5, diags=True +): + """Simulate plasma stage with given beam parameters.""" + # Laser parameters + # ---------------- + + a_0 = 2.36 + w_0 = 36e-6 + tau_fwhm = 7.33841e-14 * np.sqrt(2.0 * np.log(2)) # FWHM in intensity + z_foc = 0.0 # Focal position of the laser + + # Beam parameters + # --------------- + i = 0 # chooses the beam energy (1 GeV for i = 0) and beta values from above + z_beam = ( + 60 + beam_z_i_2 + ) * 1e-6 # distance of the beam center to the drive laser pulse + l_beam = beam_length_um * 1e-6 # beam length (m) + + # beam currents are coupled due to total charge + beam_i_r1 = 1.0 - beam_i_r2 + + # we keep the beam charge almost constant (just not accounting for the gaussian flanks yet) + i_avg = ( + q_tot / l_beam * sc.c + ) # average beam current (for rectangle profile) + i1_beam = 2 * beam_i_r1 * i_avg + i2_beam = 2 * beam_i_r2 * i_avg + + n_part = 50000 + gamma_beam = gamma_from_GeV( + m_species=sc.electron_mass, E_kin_GeV=energies_GeV[i] + ) # calculate beam gamma + ene_sp = 0.1 # energy spread in % + sz0 = 1e-7 # gaussian decay + n_emitt_x = 1e-6 + n_emitt_y = 1e-6 + betax0 = betas_opt[i] + + sx0 = np.sqrt(n_emitt_x * betax0 / gamma_beam) # matched beam size (rms) + sy0 = np.sqrt(n_emitt_y * betax0 / gamma_beam) # matched beam size (rms) + + # Generate bunch (already controls numpy seed for reproducibilty) + x, y, z, ux, uy, uz, q = trapezoidal_bunch( + i1_beam, + i2_beam, + n_part=n_part, + gamma0=gamma_beam, + s_g=ene_sp * gamma_beam / 100, + length=l_beam, + s_z=sz0, + emit_x=n_emitt_x, + s_x=sx0, + emit_y=n_emitt_y, + s_y=sy0, + zf=0.0, + tf=0.0, + ) + + z -= ( + l_beam / 2 + z_beam + ) # shift the longitudinal beam position away from the driver + w = np.abs(q / sc.e) + bunch = ParticleBunch(w, x, y, z, ux, uy, uz, name="bunch") + + # Plasma density profile + # ---------------------- + l_plateau = 28.0e-5 # 28.0e-2 # (m) # SH change for short test + l_plasma = l_plateau + + # Determine guiding channel. + r_e = sc.e**2 / (4.0 * np.pi * sc.epsilon_0 * sc.m_e * sc.c**2) + + # empirical length scale for guiding the pulse in this partial blowout + emp_len_um = 40.0e-6 + rel_delta_n_over_w2 = 1.0 / (np.pi * r_e * emp_len_um**4 * n_p0) + + # Density function. + def density_profile(z): + """Define plasma density as a function of ``z``.""" + # Allocate relative density array. + n = np.zeros_like(z) + # Add plateau. + n = np.where((z >= 0) & (z <= l_plateau), 1, n) + # Return absolute density. + n_abs = n * n_p0 + # Add minimal density value everywhere where it is zero, so we avoid crashes + # Date: 2024-01-10, recommended by Angel Ferran-Pousa + n_fixed = np.where(n_abs <= 1e10, 1e10, n_abs) + return n_fixed + + # Create plasma stage + # ------------------- + # Simulation box and grid parameters. + r_max = w_0 * 4 # Maximum radial extent of the box + r_max_plasma = w_0 * 3 # Maximum radial extent of the plasma. + l_box = 200e-6 # Length of simulation box + # make sure initialize the bunch with a distance to the driver, or change this number + xi_0 = 0 # Center of the drive laser pulse + xi_max = xi_0 + 45e-6 # Right edge of the box in the speed-of-light frame + xi_min = xi_max - l_box # Left edge of the box in the speed-of-light frame + res_beam_r = 1.0 # default: 5, resolution we want for the beam radius + dr = ( + np.min([sx0, sy0]) / res_beam_r + ) # make the radial resolution depend on the RMS beam size to avoid artifacts + dz = tau_fwhm / 40 * sc.c + nr = int(r_max / dr) + nz = int(l_box / dz) + laser_evolution = True + + laser = GaussianPulse( + xi_c=xi_0, a_0=a_0, w_0=w_0, tau=tau_fwhm, z_foc=z_foc + ) + + num_snapshots_plateau = ( + 5 # all but the last will be deleted if analysis_script.py is run + ) + + # Create plasma stages. + plasma_plateau = PlasmaStage( + length=l_plateau, + density=density_profile, + wakefield_model="quasistatic_2d", + n_out=num_snapshots_plateau, + laser=laser, + laser_evolution=laser_evolution, + r_max=r_max, + r_max_plasma=r_max_plasma, + xi_min=xi_min, + xi_max=xi_max, + n_r=nr, + n_xi=nz, + dz_fields=1 * l_box, + ppc=2, + parabolic_coefficient=rel_delta_n_over_w2, + bunch_pusher="boris", + plasma_pusher="ab5", + laser_envelope_substeps=4, + laser_envelope_nxi=nz * 4, + max_gamma=25, + ) + + # Do tracking + # ----------- + beamline = Beamline( + [ + plasma_plateau, + ] + ) + bunch_list = beamline.track( + bunch, opmd_diag=diags, show_progress_bar=False + ) # SH False show + return bunch_list, num_snapshots_plateau + + +if __name__ == "__main__": + + # Parameters exposed to optimizer. + beam_i_r2 = {{beam_i_r2}} # rear end current + beam_z_i_2 = {{beam_z_i_2}} # rear end of the beam + beam_length = {{beam_length}} # beam length in microns + + simulate_plasma_stage( + beam_i_r2=beam_i_r2, + beam_z_i_2=beam_z_i_2, + beam_length_um=beam_length, + diags=True, + ) From d6fd1fb39061b2e339db14cd9f4258bcd7af9d43 Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 21 Oct 2025 11:19:03 -0500 Subject: [PATCH 2/9] Full length wake_t --- examples/libe_aposmm_nlopt/template_simulation_script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/libe_aposmm_nlopt/template_simulation_script.py b/examples/libe_aposmm_nlopt/template_simulation_script.py index 1a06b5bd..8808b8a8 100755 --- a/examples/libe_aposmm_nlopt/template_simulation_script.py +++ b/examples/libe_aposmm_nlopt/template_simulation_script.py @@ -131,7 +131,7 @@ def simulate_plasma_stage( # Plasma density profile # ---------------------- - l_plateau = 28.0e-5 # 28.0e-2 # (m) # SH change for short test + l_plateau = 28.0e-2 # (m) # SH change for short test l_plasma = l_plateau # Determine guiding channel. From 1d67cd4b3eab15f056975c8d622434f0fee64ff8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 21 Oct 2025 16:19:36 +0000 Subject: [PATCH 3/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- examples/libe_aposmm_nlopt/template_simulation_script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/libe_aposmm_nlopt/template_simulation_script.py b/examples/libe_aposmm_nlopt/template_simulation_script.py index 8808b8a8..ee7419a3 100755 --- a/examples/libe_aposmm_nlopt/template_simulation_script.py +++ b/examples/libe_aposmm_nlopt/template_simulation_script.py @@ -131,7 +131,7 @@ def simulate_plasma_stage( # Plasma density profile # ---------------------- - l_plateau = 28.0e-2 # (m) # SH change for short test + l_plateau = 28.0e-2 # (m) # SH change for short test l_plasma = l_plateau # Determine guiding channel. From dc1e7e28f33e3ee018cbfe4a3dc383f23310ae57 Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 21 Oct 2025 16:43:48 -0500 Subject: [PATCH 4/9] Add APOSMM/nlopt test --- tests/run_aposmm_nlopt.py | 148 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100755 tests/run_aposmm_nlopt.py diff --git a/tests/run_aposmm_nlopt.py b/tests/run_aposmm_nlopt.py new file mode 100755 index 00000000..d32f717e --- /dev/null +++ b/tests/run_aposmm_nlopt.py @@ -0,0 +1,148 @@ +""" Optimization of an LPA with APOSMM/nlopt and Wake-T.""" + +import numpy as np +import pickle +from math import gamma, pi, sqrt +# from multiprocessing import set_start_method + +import libensemble.gen_funcs +libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt" + +from libensemble.gen_classes import APOSMM +from optimas.generators import ExternalGenerator +from libensemble.tools import add_unique_random_streams + +from gest_api.vocs import VOCS +from optimas.evaluators import TemplateEvaluator +from optimas.explorations import Exploration + +from analysis_script import analyze_simulation + + +# Number of simulation batches, their size, and the maximum number of simulations +n_batches = 2 #10 # 8 +batch_size = 4 # 24 + +initial_sample = batch_size # *4 +max_evals = n_batches * batch_size # + initial_sample +nworkers = batch_size + + +# Create varying parameters and objectives. +mcr = 1e-2 # minimal current ratio +# back current ratio: sum with front equals to 1 and they get doubled and multiplied with the average current + +# Single source of truth for variable definitions +vars_std = { + "beam_i_r2": [mcr, 1.0-mcr], + "beam_z_i_2": [-20.0, 20.0], # µm + "beam_length": [1.0, 20.0], # µm + "beam_i_r2_on_cube": [0, 1.0], + "beam_z_i_2_on_cube": [0, 1.0], + "beam_length_on_cube": [0, 1.0], +} +n = 3 + +# Create VOCS object +# start for bin results +bin_start = 4 +# number of bins for structure-exploiting optimization (note this is nlopt so not using) +nbins = 10 + +# Build observables set with all parameters +observables_set = { + "mean_gamma", # arithmetic mean + "std_gamma", # standard deviation + "charge", # Track charge to see if we lost any particles +} + +# Note: This nlopt example does not use these fields but this tests the setup. +# Add bin results to observables +for i in range(nbins): + observables_set.add(f"bin_gammas_{i+1}") # average gammas per bin + +for i in range(10): + observables_set.add(f"bin_nparts_{i+1}") # number of particles per bin + +vocs = VOCS( + variables=vars_std, + objectives={"f": "MINIMIZE"}, + observables=observables_set +) + +bounds = np.array(vocs.bounds) +LB = bounds[:n, 0] # Lower bounds +UB = bounds[:n, 1] # Upper bounds + +pts_in_unit_cube = 0.5*np.ones((1,3)) +pts_in_original_domain = pts_in_unit_cube*(UB - LB) + LB + +for obs in vocs.observables: + print(obs) + + +# Set up APOSMM generator +persis_info = add_unique_random_streams({}, 5)[1] # SH Dont need the 5.Better to have APOSMM defaults. +persis_info["nworkers"] = nworkers # SH - not taking account of gen_on_manager in APOSMM + +variables_mapping = { + "x": ["beam_i_r2", "beam_z_i_2", "beam_length"], + "x_on_cube": ["beam_i_r2_on_cube", "beam_z_i_2_on_cube", "beam_length_on_cube"], +} + +aposmm = APOSMM( + vocs=vocs, + variables_mapping=variables_mapping, + persis_info=persis_info, + initial_sample_size=initial_sample, + # sample_points=np.atleast_2d(0.1 * (np.arange(n) + 1)), # Outside of bounds on beam_length + sample_points = pts_in_original_domain, + localopt_method="LN_BOBYQA", + rk_const=1e-4, # 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi), + run_max_eval=100 * (n + 1), + max_active_runs=batch_size, + dist_to_bound_multiple=0.5, + ftol_rel=1e-8, +) + +# Create generator. +gen = ExternalGenerator( + ext_gen=aposmm, + vocs=vocs, + save_model=True, +) + +# Create evaluator. +ev = TemplateEvaluator( + sim_template="template_simulation_script.py", + analysis_func=analyze_simulation, + sim_files=["bunch_utils.py"], +) + +# Create exploration. +exp = Exploration( + generator=gen, + evaluator=ev, + max_evals=max_evals, + sim_workers=nworkers, + run_async=False, # SH - also try with True + exploration_dir_path='./exploration_0', +) + + +if __name__ == '__main__': + # set_start_method("spawn") + # Run exploration. + exp.run() + + if exp.is_manager: + aposmm.finalize() + + # Get data in gen with user fields + H, persis_info, _ = aposmm.export(user_fields=True) + np.save("H_final.npy", H) + pickle.dump(persis_info, open("persis_info.pickle", "wb")) + + # Check sampling followed by optimization runs + assert not np.any(H['local_pt'][:initial_sample]) + assert np.all(H['local_pt'][initial_sample:]) From 6613f510510cb7a116c86f106abdf643450f4e22 Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 21 Oct 2025 16:47:17 -0500 Subject: [PATCH 5/9] Add nlopt to test dependencies --- pyproject.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 3147632a..33da58b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,10 +37,12 @@ test = [ 'pytest-mpi', 'ax-platform >=0.5.0, <1.0.0', 'matplotlib', + 'nlopt', ] all = [ 'ax-platform >=0.5.0, <1.0.0', - 'matplotlib' + 'matplotlib', + 'nlopt', ] [project.urls] From e8da14ede6486dc493fa13b769f909b35ec53342 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 21 Oct 2025 21:48:25 +0000 Subject: [PATCH 6/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/run_aposmm_nlopt.py | 41 ++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/tests/run_aposmm_nlopt.py b/tests/run_aposmm_nlopt.py index d32f717e..24d1d52a 100755 --- a/tests/run_aposmm_nlopt.py +++ b/tests/run_aposmm_nlopt.py @@ -1,11 +1,12 @@ -""" Optimization of an LPA with APOSMM/nlopt and Wake-T.""" +"""Optimization of an LPA with APOSMM/nlopt and Wake-T.""" import numpy as np import pickle -from math import gamma, pi, sqrt + # from multiprocessing import set_start_method import libensemble.gen_funcs + libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt" from libensemble.gen_classes import APOSMM @@ -20,7 +21,7 @@ # Number of simulation batches, their size, and the maximum number of simulations -n_batches = 2 #10 # 8 +n_batches = 2 # 10 # 8 batch_size = 4 # 24 initial_sample = batch_size # *4 @@ -34,7 +35,7 @@ # Single source of truth for variable definitions vars_std = { - "beam_i_r2": [mcr, 1.0-mcr], + "beam_i_r2": [mcr, 1.0 - mcr], "beam_z_i_2": [-20.0, 20.0], # µm "beam_length": [1.0, 20.0], # µm "beam_i_r2_on_cube": [0, 1.0], @@ -67,27 +68,35 @@ vocs = VOCS( variables=vars_std, objectives={"f": "MINIMIZE"}, - observables=observables_set + observables=observables_set, ) bounds = np.array(vocs.bounds) LB = bounds[:n, 0] # Lower bounds UB = bounds[:n, 1] # Upper bounds -pts_in_unit_cube = 0.5*np.ones((1,3)) -pts_in_original_domain = pts_in_unit_cube*(UB - LB) + LB +pts_in_unit_cube = 0.5 * np.ones((1, 3)) +pts_in_original_domain = pts_in_unit_cube * (UB - LB) + LB for obs in vocs.observables: print(obs) # Set up APOSMM generator -persis_info = add_unique_random_streams({}, 5)[1] # SH Dont need the 5.Better to have APOSMM defaults. -persis_info["nworkers"] = nworkers # SH - not taking account of gen_on_manager in APOSMM +persis_info = add_unique_random_streams({}, 5)[ + 1 +] # SH Dont need the 5.Better to have APOSMM defaults. +persis_info["nworkers"] = ( + nworkers # SH - not taking account of gen_on_manager in APOSMM +) variables_mapping = { "x": ["beam_i_r2", "beam_z_i_2", "beam_length"], - "x_on_cube": ["beam_i_r2_on_cube", "beam_z_i_2_on_cube", "beam_length_on_cube"], + "x_on_cube": [ + "beam_i_r2_on_cube", + "beam_z_i_2_on_cube", + "beam_length_on_cube", + ], } aposmm = APOSMM( @@ -96,9 +105,9 @@ persis_info=persis_info, initial_sample_size=initial_sample, # sample_points=np.atleast_2d(0.1 * (np.arange(n) + 1)), # Outside of bounds on beam_length - sample_points = pts_in_original_domain, + sample_points=pts_in_original_domain, localopt_method="LN_BOBYQA", - rk_const=1e-4, # 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi), + rk_const=1e-4, # 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi), run_max_eval=100 * (n + 1), max_active_runs=batch_size, dist_to_bound_multiple=0.5, @@ -126,11 +135,11 @@ max_evals=max_evals, sim_workers=nworkers, run_async=False, # SH - also try with True - exploration_dir_path='./exploration_0', + exploration_dir_path="./exploration_0", ) -if __name__ == '__main__': +if __name__ == "__main__": # set_start_method("spawn") # Run exploration. exp.run() @@ -144,5 +153,5 @@ pickle.dump(persis_info, open("persis_info.pickle", "wb")) # Check sampling followed by optimization runs - assert not np.any(H['local_pt'][:initial_sample]) - assert np.all(H['local_pt'][initial_sample:]) + assert not np.any(H["local_pt"][:initial_sample]) + assert np.all(H["local_pt"][initial_sample:]) From 4a0b8ed73b3093ad076d620a7d9dacc130bb46fe Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 21 Oct 2025 16:52:59 -0500 Subject: [PATCH 7/9] Replace test --- tests/run_aposmm_nlopt.py | 157 ----------------------------------- tests/test_libEgen_aposmm.py | 136 ++++++++++++++++++++++++++++++ 2 files changed, 136 insertions(+), 157 deletions(-) delete mode 100755 tests/run_aposmm_nlopt.py create mode 100644 tests/test_libEgen_aposmm.py diff --git a/tests/run_aposmm_nlopt.py b/tests/run_aposmm_nlopt.py deleted file mode 100755 index 24d1d52a..00000000 --- a/tests/run_aposmm_nlopt.py +++ /dev/null @@ -1,157 +0,0 @@ -"""Optimization of an LPA with APOSMM/nlopt and Wake-T.""" - -import numpy as np -import pickle - -# from multiprocessing import set_start_method - -import libensemble.gen_funcs - -libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt" - -from libensemble.gen_classes import APOSMM -from optimas.generators import ExternalGenerator -from libensemble.tools import add_unique_random_streams - -from gest_api.vocs import VOCS -from optimas.evaluators import TemplateEvaluator -from optimas.explorations import Exploration - -from analysis_script import analyze_simulation - - -# Number of simulation batches, their size, and the maximum number of simulations -n_batches = 2 # 10 # 8 -batch_size = 4 # 24 - -initial_sample = batch_size # *4 -max_evals = n_batches * batch_size # + initial_sample -nworkers = batch_size - - -# Create varying parameters and objectives. -mcr = 1e-2 # minimal current ratio -# back current ratio: sum with front equals to 1 and they get doubled and multiplied with the average current - -# Single source of truth for variable definitions -vars_std = { - "beam_i_r2": [mcr, 1.0 - mcr], - "beam_z_i_2": [-20.0, 20.0], # µm - "beam_length": [1.0, 20.0], # µm - "beam_i_r2_on_cube": [0, 1.0], - "beam_z_i_2_on_cube": [0, 1.0], - "beam_length_on_cube": [0, 1.0], -} -n = 3 - -# Create VOCS object -# start for bin results -bin_start = 4 -# number of bins for structure-exploiting optimization (note this is nlopt so not using) -nbins = 10 - -# Build observables set with all parameters -observables_set = { - "mean_gamma", # arithmetic mean - "std_gamma", # standard deviation - "charge", # Track charge to see if we lost any particles -} - -# Note: This nlopt example does not use these fields but this tests the setup. -# Add bin results to observables -for i in range(nbins): - observables_set.add(f"bin_gammas_{i+1}") # average gammas per bin - -for i in range(10): - observables_set.add(f"bin_nparts_{i+1}") # number of particles per bin - -vocs = VOCS( - variables=vars_std, - objectives={"f": "MINIMIZE"}, - observables=observables_set, -) - -bounds = np.array(vocs.bounds) -LB = bounds[:n, 0] # Lower bounds -UB = bounds[:n, 1] # Upper bounds - -pts_in_unit_cube = 0.5 * np.ones((1, 3)) -pts_in_original_domain = pts_in_unit_cube * (UB - LB) + LB - -for obs in vocs.observables: - print(obs) - - -# Set up APOSMM generator -persis_info = add_unique_random_streams({}, 5)[ - 1 -] # SH Dont need the 5.Better to have APOSMM defaults. -persis_info["nworkers"] = ( - nworkers # SH - not taking account of gen_on_manager in APOSMM -) - -variables_mapping = { - "x": ["beam_i_r2", "beam_z_i_2", "beam_length"], - "x_on_cube": [ - "beam_i_r2_on_cube", - "beam_z_i_2_on_cube", - "beam_length_on_cube", - ], -} - -aposmm = APOSMM( - vocs=vocs, - variables_mapping=variables_mapping, - persis_info=persis_info, - initial_sample_size=initial_sample, - # sample_points=np.atleast_2d(0.1 * (np.arange(n) + 1)), # Outside of bounds on beam_length - sample_points=pts_in_original_domain, - localopt_method="LN_BOBYQA", - rk_const=1e-4, # 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi), - run_max_eval=100 * (n + 1), - max_active_runs=batch_size, - dist_to_bound_multiple=0.5, - ftol_rel=1e-8, -) - -# Create generator. -gen = ExternalGenerator( - ext_gen=aposmm, - vocs=vocs, - save_model=True, -) - -# Create evaluator. -ev = TemplateEvaluator( - sim_template="template_simulation_script.py", - analysis_func=analyze_simulation, - sim_files=["bunch_utils.py"], -) - -# Create exploration. -exp = Exploration( - generator=gen, - evaluator=ev, - max_evals=max_evals, - sim_workers=nworkers, - run_async=False, # SH - also try with True - exploration_dir_path="./exploration_0", -) - - -if __name__ == "__main__": - # set_start_method("spawn") - # Run exploration. - exp.run() - - if exp.is_manager: - aposmm.finalize() - - # Get data in gen with user fields - H, persis_info, _ = aposmm.export(user_fields=True) - np.save("H_final.npy", H) - pickle.dump(persis_info, open("persis_info.pickle", "wb")) - - # Check sampling followed by optimization runs - assert not np.any(H["local_pt"][:initial_sample]) - assert np.all(H["local_pt"][initial_sample:]) diff --git a/tests/test_libEgen_aposmm.py b/tests/test_libEgen_aposmm.py new file mode 100644 index 00000000..746985a9 --- /dev/null +++ b/tests/test_libEgen_aposmm.py @@ -0,0 +1,136 @@ +""" +Optimization of a 2D function with APOSMM/nlopt. + +Export options can also be tested. +""" + +import os +from math import gamma, pi, sqrt +import numpy as np +import libensemble.gen_funcs +libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt" + +from libensemble.gen_classes import APOSMM +from optimas.generators import ExternalGenerator +from libensemble.tools import add_unique_random_streams + +from gest_api.vocs import VOCS +from optimas.evaluators import TemplateEvaluator +from optimas.explorations import Exploration + + +def analyze_simulation(simulation_directory, output_params): + """ + Analyze the simulation output. + This method analyzes the output generated by the simulation to + obtain the value of the optimization objective and other analyzed + parameters, if specified. The value of these parameters has to be + given to the `output_params` dictionary. + + Parameters + ---------- + + simulation_directory : str + Path to the simulation folder where the output was generated. + output_params : dict + Dictionary where the value of the objectives and analyzed parameters + will be stored. There is one entry per parameter, where the key + is the name of the parameter given by the user. + + Returns + ------- + + dict + The `output_params` dictionary with the results from the analysis. + """ + # Read back result from file + with open("result.txt") as f: + result = float(f.read()) + # Fill in output parameters. + output_params["f"] = result + return output_params + + +def test_aposmm_nlopt(): + """Test APOSMM/nlopt with a 2D function.""" + initial_sample_size = 8 + max_evals = 40 + n = 2 # dimensions + + # Create varying parameters and objectives. + vocs = VOCS( + variables= { + "x0": [-3.0, 3.0], + "x1": [-2.0, 2.0], + "x0_on_cube": [0, 1.0], + "x1_on_cube": [0, 1.0], + }, + objectives={ + "f": "MINIMIZE" + }, + ) + + # How APOSMM will convert the variables to internal arrays. + variables_mapping = { + "x": ["x0", "x1"], + "x_on_cube": [ + "x0_on_cube", + "x1_on_cube", + ], + } + + aposmm = APOSMM( + vocs=vocs, + variables_mapping=variables_mapping, + initial_sample_size=initial_sample_size, + localopt_method="LN_BOBYQA", + rk_const=0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi), + xtol_abs=1e-6, + ftol_abs=1e-6, + dist_to_bound_multiple=0.5, + max_active_runs=4, # refers to APOSMM's simul local optimization runs + ) + + # Create generator. + gen = ExternalGenerator( + ext_gen=aposmm, + vocs=vocs, + save_model=True, + ) + + # Create evaluator. + ev = TemplateEvaluator( + sim_template=os.path.join( + os.path.abspath(os.path.dirname(__file__)), + "resources", + "template_simulation_script.py", + ), + analysis_func=analyze_simulation, + ) + + # Create exploration. + exp = Exploration( + generator=gen, + evaluator=ev, + max_evals=max_evals, + sim_workers=4, + run_async=True, + exploration_dir_path="./tests_output/test_libEgen_aposmm", + ) + + # Run exploration + exp.run() + + if exp.is_manager: + aposmm.finalize() + + # Get data in gen format and in user format + H, persis_info, _ = aposmm.export() + + # Check sampling followed by optimization runs + assert not np.any(H["local_pt"][:initial_sample_size]) + assert np.all(H["local_pt"][initial_sample_size:]) + + +if __name__ == "__main__": + test_aposmm_nlopt() From 0d7430b2613cb2d9bd5a87ef4ce1a3a672f395d6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 21 Oct 2025 21:53:14 +0000 Subject: [PATCH 8/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/test_libEgen_aposmm.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_libEgen_aposmm.py b/tests/test_libEgen_aposmm.py index 746985a9..1046e3f8 100644 --- a/tests/test_libEgen_aposmm.py +++ b/tests/test_libEgen_aposmm.py @@ -8,6 +8,7 @@ from math import gamma, pi, sqrt import numpy as np import libensemble.gen_funcs + libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt" from libensemble.gen_classes import APOSMM @@ -59,15 +60,13 @@ def test_aposmm_nlopt(): # Create varying parameters and objectives. vocs = VOCS( - variables= { + variables={ "x0": [-3.0, 3.0], "x1": [-2.0, 2.0], "x0_on_cube": [0, 1.0], "x1_on_cube": [0, 1.0], }, - objectives={ - "f": "MINIMIZE" - }, + objectives={"f": "MINIMIZE"}, ) # How APOSMM will convert the variables to internal arrays. From e616244012ca56972c42f0643cac5f51680d928c Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 21 Oct 2025 19:06:28 -0500 Subject: [PATCH 9/9] Use libE branch --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 33da58b6..f2319f0f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ classifiers = [ 'Programming Language :: Python :: 3.12', ] dependencies = [ - 'libensemble >= 1.3.0', + 'libensemble @ git+https://github.com/Libensemble/libensemble.git@experimental/jlnav_plus_shuds_asktell', 'jinja2', 'pandas', 'mpi4py',