diff --git a/scripts/imaging/features/advanced/double_einstein_ring/modeling.py b/scripts/imaging/features/advanced/double_einstein_ring/modeling.py index d5cdc01ea..c191e171d 100644 --- a/scripts/imaging/features/advanced/double_einstein_ring/modeling.py +++ b/scripts/imaging/features/advanced/double_einstein_ring/modeling.py @@ -265,8 +265,11 @@ images for different source planes can require all the additional data to be stored in VRAM. This will at least double the VRAM requirements compared to a single lens plane model, but often more than this. -Given VRAM use is an important consideration, we print out the estimated VRAM required for this +Given VRAM use is an important consideration, we print out the estimated VRAM required for this model-fit and advise you do this for your own double source plane lens model-fits. + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ analysis.print_vram_use(model=model, batch_size=search.batch_size) diff --git a/scripts/imaging/features/advanced/mass_stellar_dark/modeling.py b/scripts/imaging/features/advanced/mass_stellar_dark/modeling.py index 69424c2ea..23dad442e 100644 --- a/scripts/imaging/features/advanced/mass_stellar_dark/modeling.py +++ b/scripts/imaging/features/advanced/mass_stellar_dark/modeling.py @@ -231,8 +231,11 @@ Deflection angle calculations of stellar mass models and dark matter mass models can use techniques whichs store more data in VRAM than other methods. -Given VRAM use is an important consideration, we print out the estimated VRAM required for this +Given VRAM use is an important consideration, we print out the estimated VRAM required for this model-fit and advise you do this for your own double source plane lens model-fits. + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ analysis.print_vram_use(model=model, batch_size=search.batch_size) diff --git a/scripts/imaging/features/pixelization/delaunay.py b/scripts/imaging/features/pixelization/delaunay.py index 9de324afe..30e2ab119 100644 --- a/scripts/imaging/features/pixelization/delaunay.py +++ b/scripts/imaging/features/pixelization/delaunay.py @@ -421,6 +421,9 @@ and how it depends on image resolution, number of source pixels and batch size. This is true for the Delaunay mesh, therefore we print out the estimated VRAM required for this model-fit. + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ analysis_1.print_vram_use(model=model_1, batch_size=search_1.batch_size) diff --git a/scripts/imaging/features/pixelization/modeling.py b/scripts/imaging/features/pixelization/modeling.py index c47afb18a..1a2ae8281 100644 --- a/scripts/imaging/features/pixelization/modeling.py +++ b/scripts/imaging/features/pixelization/modeling.py @@ -384,8 +384,11 @@ This is why the `batch_size` above is 20, lower than other examples, because reducing the batch size ensures a more modest amount of VRAM is used. If you have a GPU with more VRAM, increasing the batch size will lead to faster run times. -Given VRAM use is an important consideration, we print out the estimated VRAM required for this +Given VRAM use is an important consideration, we print out the estimated VRAM required for this model-fit and advise you do this for your own pixelization model-fits. + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ analysis.print_vram_use(model=model, batch_size=search.batch_size) diff --git a/scripts/interferometer/features/pixelization/delaunay.py b/scripts/interferometer/features/pixelization/delaunay.py index 8b76422a9..ae0c59c65 100644 --- a/scripts/interferometer/features/pixelization/delaunay.py +++ b/scripts/interferometer/features/pixelization/delaunay.py @@ -323,6 +323,9 @@ and how it depends on image resolution, number of source pixels and batch size. This is true for the Delaunay mesh, therefore we print out the estimated VRAM required for this model-fit. + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ analysis_1.print_vram_use(model=model_1, batch_size=search_1.batch_size) diff --git a/scripts/interferometer/features/pixelization/modeling.py b/scripts/interferometer/features/pixelization/modeling.py index ba5d9de64..f6d4531e9 100644 --- a/scripts/interferometer/features/pixelization/modeling.py +++ b/scripts/interferometer/features/pixelization/modeling.py @@ -399,8 +399,11 @@ This is because the sparse operator compresses all the visibility information into a matrix whose size depends only on the number of pixels in the real-space mask. VRAM use is therefore mostly driven by how many pixels are in the real space mask. -VRAM does scale with batch size though, and for high resoluiton datasets may require you to reduce from the value of +VRAM does scale with batch size though, and for high resoluiton datasets may require you to reduce from the value of 20 set above if your GPU does not have too much VRAM (e.g. < 4GB). + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ analysis.print_vram_use(model=model, batch_size=search.batch_size) diff --git a/scripts/interferometer/modeling.py b/scripts/interferometer/modeling.py index 5d1771950..4903d1308 100644 --- a/scripts/interferometer/modeling.py +++ b/scripts/interferometer/modeling.py @@ -474,7 +474,7 @@ ) aplt.subplot_fit_interferometer(fit=result.max_log_likelihood_fit) -aplt.subplot_fit_dirty_images(fit=fit) +aplt.subplot_fit_dirty_images(fit=result.max_log_likelihood_fit) """ The result contains the full posterior information of our non-linear search, including all parameter samples, diff --git a/scripts/multi/modeling.py b/scripts/multi/modeling.py index 61f125ef5..a3f878419 100644 --- a/scripts/multi/modeling.py +++ b/scripts/multi/modeling.py @@ -297,8 +297,11 @@ When multiple datasets are fitted simultaneously, as in this example, VRAM usage increases with each dataset, as their data structures must all be stored in VRAM. -Given VRAM use is an important consideration, we print out the estimated VRAM required for this +Given VRAM use is an important consideration, we print out the estimated VRAM required for this model-fit and advise you do this for your own pixelization model-fits. + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ factor_graph.print_vram_use( model=factor_graph.global_prior_model, batch_size=search.batch_size diff --git a/scripts/point_source/modeling.py b/scripts/point_source/modeling.py index f0231f8fc..031e77d07 100644 --- a/scripts/point_source/modeling.py +++ b/scripts/point_source/modeling.py @@ -368,8 +368,11 @@ chosen batch size is comfortably below their GPU’s total VRAM. For a point solver with an image-plane chi squared and one set of positions with a single plane VRAM use is relatively -low (~0.1GB). For models with more planes and datasets with more multiple images it can be much higher (> 1GB going +low (~0.1GB). For models with more planes and datasets with more multiple images it can be much higher (> 1GB going beyond 10GB). + +The method below prints the VRAM usage estimate for the analysis and model with the specified batch size, +it takes about 20-30 seconds to run so you may want to comment it out once you are familiar with your GPU's VRAM limits. """ analysis.print_vram_use(model=model, batch_size=search.batch_size)